mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Invert service discovery dependencies (#7701)
This also fixes a bug in query_log_file, which now is relative to the config file like all other paths. Signed-off-by: Andy Bursavich <abursavich@gmail.com>
This commit is contained in:
parent
274dce9de2
commit
4e6a94a27d
|
@ -53,7 +53,6 @@ import (
|
|||
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
sd_config "github.com/prometheus/prometheus/discovery/config"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/logging"
|
||||
|
@ -67,6 +66,8 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
"github.com/prometheus/prometheus/web"
|
||||
|
||||
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -466,9 +467,9 @@ func main() {
|
|||
}, {
|
||||
name: "scrape_sd",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
c := make(map[string]sd_config.ServiceDiscoveryConfig)
|
||||
c := make(map[string]discovery.Configs)
|
||||
for _, v := range cfg.ScrapeConfigs {
|
||||
c[v.JobName] = v.ServiceDiscoveryConfig
|
||||
c[v.JobName] = v.ServiceDiscoveryConfigs
|
||||
}
|
||||
return discoveryManagerScrape.ApplyConfig(c)
|
||||
},
|
||||
|
@ -478,9 +479,9 @@ func main() {
|
|||
}, {
|
||||
name: "notify_sd",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
c := make(map[string]sd_config.ServiceDiscoveryConfig)
|
||||
c := make(map[string]discovery.Configs)
|
||||
for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() {
|
||||
c[k] = v.ServiceDiscoveryConfig
|
||||
c[k] = v.ServiceDiscoveryConfigs
|
||||
}
|
||||
return discoveryManagerNotify.ApplyConfig(c)
|
||||
},
|
||||
|
|
|
@ -40,7 +40,11 @@ import (
|
|||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/file"
|
||||
"github.com/prometheus/prometheus/discovery/kubernetes"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
|
||||
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -263,24 +267,25 @@ func checkConfig(filename string) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
|
||||
if err := checkTLSConfig(kd.HTTPClientConfig.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, filesd := range scfg.ServiceDiscoveryConfig.FileSDConfigs {
|
||||
for _, file := range filesd.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
for _, c := range scfg.ServiceDiscoveryConfigs {
|
||||
switch c := c.(type) {
|
||||
case *kubernetes.SDConfig:
|
||||
if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
// There was at least one match for the glob and we can assume checkFileExists
|
||||
// for all matches would pass, we can continue the loop.
|
||||
continue
|
||||
case *file.SDConfig:
|
||||
for _, file := range c.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
// There was at least one match for the glob and we can assume checkFileExists
|
||||
// for all matches would pass, we can continue the loop.
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName)
|
||||
}
|
||||
fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
232
config/config.go
232
config/config.go
|
@ -23,11 +23,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
sd_config "github.com/prometheus/prometheus/discovery/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/relabel"
|
||||
)
|
||||
|
@ -48,7 +48,6 @@ func Load(s string) (*Config, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.original = s
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
|
@ -62,7 +61,7 @@ func LoadFile(filename string) (*Config, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing YAML file %s", filename)
|
||||
}
|
||||
resolveFilepaths(filepath.Dir(filename), cfg)
|
||||
cfg.SetDirectory(filepath.Dir(filename))
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
|
@ -137,80 +136,23 @@ type Config struct {
|
|||
|
||||
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
||||
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
|
||||
|
||||
// original is the input from which the config was parsed.
|
||||
original string
|
||||
}
|
||||
|
||||
// resolveFilepaths joins all relative paths in a configuration
|
||||
// with a given base directory.
|
||||
func resolveFilepaths(baseDir string, cfg *Config) {
|
||||
join := func(fp string) string {
|
||||
if len(fp) > 0 && !filepath.IsAbs(fp) {
|
||||
fp = filepath.Join(baseDir, fp)
|
||||
}
|
||||
return fp
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *Config) SetDirectory(dir string) {
|
||||
c.GlobalConfig.SetDirectory(dir)
|
||||
c.AlertingConfig.SetDirectory(dir)
|
||||
for i, file := range c.RuleFiles {
|
||||
c.RuleFiles[i] = config.JoinDir(dir, file)
|
||||
}
|
||||
|
||||
for i, rf := range cfg.RuleFiles {
|
||||
cfg.RuleFiles[i] = join(rf)
|
||||
for _, c := range c.ScrapeConfigs {
|
||||
c.SetDirectory(dir)
|
||||
}
|
||||
|
||||
tlsPaths := func(cfg *config_util.TLSConfig) {
|
||||
cfg.CAFile = join(cfg.CAFile)
|
||||
cfg.CertFile = join(cfg.CertFile)
|
||||
cfg.KeyFile = join(cfg.KeyFile)
|
||||
for _, c := range c.RemoteWriteConfigs {
|
||||
c.SetDirectory(dir)
|
||||
}
|
||||
clientPaths := func(scfg *config_util.HTTPClientConfig) {
|
||||
if scfg.BasicAuth != nil {
|
||||
scfg.BasicAuth.PasswordFile = join(scfg.BasicAuth.PasswordFile)
|
||||
}
|
||||
scfg.BearerTokenFile = join(scfg.BearerTokenFile)
|
||||
tlsPaths(&scfg.TLSConfig)
|
||||
}
|
||||
sdPaths := func(cfg *sd_config.ServiceDiscoveryConfig) {
|
||||
for _, kcfg := range cfg.KubernetesSDConfigs {
|
||||
clientPaths(&kcfg.HTTPClientConfig)
|
||||
}
|
||||
for _, mcfg := range cfg.MarathonSDConfigs {
|
||||
mcfg.AuthTokenFile = join(mcfg.AuthTokenFile)
|
||||
clientPaths(&mcfg.HTTPClientConfig)
|
||||
}
|
||||
for _, consulcfg := range cfg.ConsulSDConfigs {
|
||||
tlsPaths(&consulcfg.TLSConfig)
|
||||
}
|
||||
for _, digitaloceancfg := range cfg.DigitalOceanSDConfigs {
|
||||
clientPaths(&digitaloceancfg.HTTPClientConfig)
|
||||
}
|
||||
for _, dockerswarmcfg := range cfg.DockerSwarmSDConfigs {
|
||||
clientPaths(&dockerswarmcfg.HTTPClientConfig)
|
||||
}
|
||||
for _, cfg := range cfg.OpenstackSDConfigs {
|
||||
tlsPaths(&cfg.TLSConfig)
|
||||
}
|
||||
for _, cfg := range cfg.TritonSDConfigs {
|
||||
tlsPaths(&cfg.TLSConfig)
|
||||
}
|
||||
for _, filecfg := range cfg.FileSDConfigs {
|
||||
for i, fn := range filecfg.Files {
|
||||
filecfg.Files[i] = join(fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, cfg := range cfg.ScrapeConfigs {
|
||||
clientPaths(&cfg.HTTPClientConfig)
|
||||
sdPaths(&cfg.ServiceDiscoveryConfig)
|
||||
}
|
||||
for _, cfg := range cfg.AlertingConfig.AlertmanagerConfigs {
|
||||
clientPaths(&cfg.HTTPClientConfig)
|
||||
sdPaths(&cfg.ServiceDiscoveryConfig)
|
||||
}
|
||||
for _, cfg := range cfg.RemoteReadConfigs {
|
||||
clientPaths(&cfg.HTTPClientConfig)
|
||||
}
|
||||
for _, cfg := range cfg.RemoteWriteConfigs {
|
||||
clientPaths(&cfg.HTTPClientConfig)
|
||||
for _, c := range c.RemoteReadConfigs {
|
||||
c.SetDirectory(dir)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -311,6 +253,11 @@ type GlobalConfig struct {
|
|||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *GlobalConfig) SetDirectory(dir string) {
|
||||
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// Create a clean global config as the previous one was already populated
|
||||
|
@ -388,8 +335,8 @@ type ScrapeConfig struct {
|
|||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
||||
ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"`
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
|
||||
// List of target relabel configurations.
|
||||
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
|
||||
|
@ -397,12 +344,16 @@ type ScrapeConfig struct {
|
|||
MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *ScrapeConfig) SetDirectory(dir string) {
|
||||
c.ServiceDiscoveryConfigs.SetDirectory(dir)
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultScrapeConfig
|
||||
type plain ScrapeConfig
|
||||
err := unmarshal((*plain)(c))
|
||||
if err != nil {
|
||||
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(c.JobName) == 0 {
|
||||
|
@ -416,21 +367,10 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// The UnmarshalYAML method of ServiceDiscoveryConfig is not being called because it's not a pointer.
|
||||
// We cannot make it a pointer as the parser panics for inlined pointer structs.
|
||||
// Thus we just do its validation here.
|
||||
if err := c.ServiceDiscoveryConfig.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for users putting URLs in target groups.
|
||||
if len(c.RelabelConfigs) == 0 {
|
||||
for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
|
||||
for _, t := range tg.Targets {
|
||||
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -445,21 +385,27 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Add index to the static config target groups for unique identification
|
||||
// within scrape pool.
|
||||
for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
|
||||
tg.Source = fmt.Sprintf("%d", i)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
|
||||
return discovery.MarshalYAMLWithInlineConfigs(c)
|
||||
}
|
||||
|
||||
// AlertingConfig configures alerting and alertmanager related configs.
|
||||
type AlertingConfig struct {
|
||||
AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"`
|
||||
AlertmanagerConfigs AlertmanagerConfigs `yaml:"alertmanagers,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *AlertingConfig) SetDirectory(dir string) {
|
||||
for _, c := range c.AlertmanagerConfigs {
|
||||
c.SetDirectory(dir)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// Create a clean global config as the previous one was already populated
|
||||
|
@ -529,8 +475,8 @@ type AlertmanagerConfig struct {
|
|||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
||||
ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"`
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
|
||||
// The URL scheme to use when talking to Alertmanagers.
|
||||
Scheme string `yaml:"scheme,omitempty"`
|
||||
|
@ -546,11 +492,16 @@ type AlertmanagerConfig struct {
|
|||
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *AlertmanagerConfig) SetDirectory(dir string) {
|
||||
c.ServiceDiscoveryConfigs.SetDirectory(dir)
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultAlertmanagerConfig
|
||||
type plain AlertmanagerConfig
|
||||
if err := unmarshal((*plain)(c)); err != nil {
|
||||
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -561,21 +512,10 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
|||
return err
|
||||
}
|
||||
|
||||
// The UnmarshalYAML method of ServiceDiscoveryConfig is not being called because it's not a pointer.
|
||||
// We cannot make it a pointer as the parser panics for inlined pointer structs.
|
||||
// Thus we just do its validation here.
|
||||
if err := c.ServiceDiscoveryConfig.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for users putting URLs in target groups.
|
||||
if len(c.RelabelConfigs) == 0 {
|
||||
for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
|
||||
for _, t := range tg.Targets {
|
||||
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -585,12 +525,28 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
|||
}
|
||||
}
|
||||
|
||||
// Add index to the static config target groups for unique identification
|
||||
// within scrape pool.
|
||||
for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
|
||||
tg.Source = fmt.Sprintf("%d", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) {
|
||||
return discovery.MarshalYAMLWithInlineConfigs(c)
|
||||
}
|
||||
|
||||
func checkStaticTargets(configs discovery.Configs) error {
|
||||
for _, cfg := range configs {
|
||||
sc, ok := cfg.(discovery.StaticConfig)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, tg := range sc {
|
||||
for _, t := range tg.Targets {
|
||||
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -603,29 +559,22 @@ func CheckTargetAddress(address model.LabelValue) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ClientCert contains client cert credentials.
|
||||
type ClientCert struct {
|
||||
Cert string `yaml:"cert"`
|
||||
Key config_util.Secret `yaml:"key"`
|
||||
}
|
||||
|
||||
// FileSDConfig is the configuration for file based discovery.
|
||||
type FileSDConfig struct {
|
||||
Files []string `yaml:"files"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
}
|
||||
|
||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||
type RemoteWriteConfig struct {
|
||||
URL *config_util.URL `yaml:"url"`
|
||||
URL *config.URL `yaml:"url"`
|
||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *RemoteWriteConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -676,20 +625,25 @@ type QueueConfig struct {
|
|||
|
||||
// RemoteReadConfig is the configuration for reading from remote storage.
|
||||
type RemoteReadConfig struct {
|
||||
URL *config_util.URL `yaml:"url"`
|
||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||
ReadRecent bool `yaml:"read_recent,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
URL *config.URL `yaml:"url"`
|
||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||
ReadRecent bool `yaml:"read_recent,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
|
||||
// RequiredMatchers is an optional list of equality matchers which have to
|
||||
// be present in a selector to query the remote read endpoint.
|
||||
RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *RemoteReadConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultRemoteReadConfig
|
||||
|
|
|
@ -24,5 +24,4 @@ var ruleFilesExpectedConf = &Config{
|
|||
"testdata/rules/second.rules",
|
||||
"/absolute/third.rules",
|
||||
},
|
||||
original: "",
|
||||
}
|
||||
|
|
|
@ -23,12 +23,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/azure"
|
||||
sd_config "github.com/prometheus/prometheus/discovery/config"
|
||||
"github.com/prometheus/prometheus/discovery/consul"
|
||||
"github.com/prometheus/prometheus/discovery/digitalocean"
|
||||
"github.com/prometheus/prometheus/discovery/dns"
|
||||
|
@ -46,12 +46,12 @@ import (
|
|||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func mustParseURL(u string) *config_util.URL {
|
||||
func mustParseURL(u string) *config.URL {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &config_util.URL{URL: parsed}
|
||||
return &config.URL{URL: parsed}
|
||||
}
|
||||
|
||||
var expectedConf = &Config{
|
||||
|
@ -93,8 +93,8 @@ var expectedConf = &Config{
|
|||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
Name: "rw_tls",
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
|
@ -115,8 +115,8 @@ var expectedConf = &Config{
|
|||
ReadRecent: false,
|
||||
Name: "read_special",
|
||||
RequiredMatchers: model.LabelSet{"job": "special"},
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
|
@ -136,12 +136,20 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BearerTokenFile: filepath.FromSlash("testdata/valid_token_file"),
|
||||
},
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&file.SDConfig{
|
||||
Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"},
|
||||
RefreshInterval: model.Duration(10 * time.Minute),
|
||||
},
|
||||
&file.SDConfig{
|
||||
Files: []string{"testdata/bar/*.yaml"},
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
},
|
||||
discovery.StaticConfig{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: "localhost:9090"},
|
||||
|
@ -154,17 +162,6 @@ var expectedConf = &Config{
|
|||
Source: "0",
|
||||
},
|
||||
},
|
||||
|
||||
FileSDConfigs: []*file.SDConfig{
|
||||
{
|
||||
Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"},
|
||||
RefreshInterval: model.Duration(10 * time.Minute),
|
||||
},
|
||||
{
|
||||
Files: []string{"testdata/bar/*.yaml"},
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
RelabelConfigs: []*relabel.Config{
|
||||
|
@ -206,8 +203,8 @@ var expectedConf = &Config{
|
|||
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||
SampleLimit: 1000,
|
||||
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
BasicAuth: &config_util.BasicAuth{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
Username: "admin_name",
|
||||
Password: "multiline\nmysecret\ntest",
|
||||
},
|
||||
|
@ -215,23 +212,21 @@ var expectedConf = &Config{
|
|||
MetricsPath: "/my_path",
|
||||
Scheme: "https",
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
DNSSDConfigs: []*dns.SDConfig{
|
||||
{
|
||||
Names: []string{
|
||||
"first.dns.address.domain.com",
|
||||
"second.dns.address.domain.com",
|
||||
},
|
||||
RefreshInterval: model.Duration(15 * time.Second),
|
||||
Type: "SRV",
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&dns.SDConfig{
|
||||
Names: []string{
|
||||
"first.dns.address.domain.com",
|
||||
"second.dns.address.domain.com",
|
||||
},
|
||||
{
|
||||
Names: []string{
|
||||
"first.dns.address.domain.com",
|
||||
},
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
Type: "SRV",
|
||||
RefreshInterval: model.Duration(15 * time.Second),
|
||||
Type: "SRV",
|
||||
},
|
||||
&dns.SDConfig{
|
||||
Names: []string{
|
||||
"first.dns.address.domain.com",
|
||||
},
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
Type: "SRV",
|
||||
},
|
||||
},
|
||||
|
||||
|
@ -298,24 +293,22 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
ConsulSDConfigs: []*consul.SDConfig{
|
||||
{
|
||||
Server: "localhost:1234",
|
||||
Token: "mysecret",
|
||||
Services: []string{"nginx", "cache", "mysql"},
|
||||
ServiceTags: []string{"canary", "v1"},
|
||||
NodeMeta: map[string]string{"rack": "123"},
|
||||
TagSeparator: consul.DefaultSDConfig.TagSeparator,
|
||||
Scheme: "https",
|
||||
RefreshInterval: consul.DefaultSDConfig.RefreshInterval,
|
||||
AllowStale: true,
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
CAFile: filepath.FromSlash("testdata/valid_ca_file"),
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&consul.SDConfig{
|
||||
Server: "localhost:1234",
|
||||
Token: "mysecret",
|
||||
Services: []string{"nginx", "cache", "mysql"},
|
||||
ServiceTags: []string{"canary", "v1"},
|
||||
NodeMeta: map[string]string{"rack": "123"},
|
||||
TagSeparator: consul.DefaultSDConfig.TagSeparator,
|
||||
Scheme: "https",
|
||||
RefreshInterval: consul.DefaultSDConfig.RefreshInterval,
|
||||
AllowStale: true,
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
CAFile: filepath.FromSlash("testdata/valid_ca_file"),
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -341,8 +334,8 @@ var expectedConf = &Config{
|
|||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
|
@ -360,23 +353,21 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
KubernetesSDConfigs: []*kubernetes.SDConfig{
|
||||
{
|
||||
APIServer: kubernetesSDHostURL(),
|
||||
Role: kubernetes.RoleEndpoint,
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
BasicAuth: &config_util.BasicAuth{
|
||||
Username: "myusername",
|
||||
Password: "mysecret",
|
||||
},
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&kubernetes.SDConfig{
|
||||
APIServer: kubernetesSDHostURL(),
|
||||
Role: kubernetes.RoleEndpoint,
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
Username: "myusername",
|
||||
Password: "mysecret",
|
||||
},
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
NamespaceDiscovery: kubernetes.NamespaceDiscovery{},
|
||||
},
|
||||
NamespaceDiscovery: kubernetes.NamespaceDiscovery{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -389,22 +380,20 @@ var expectedConf = &Config{
|
|||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
BasicAuth: &config_util.BasicAuth{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
Username: "myusername",
|
||||
PasswordFile: filepath.FromSlash("testdata/valid_password_file"),
|
||||
},
|
||||
},
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
KubernetesSDConfigs: []*kubernetes.SDConfig{
|
||||
{
|
||||
APIServer: kubernetesSDHostURL(),
|
||||
Role: kubernetes.RoleEndpoint,
|
||||
NamespaceDiscovery: kubernetes.NamespaceDiscovery{
|
||||
Names: []string{
|
||||
"default",
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&kubernetes.SDConfig{
|
||||
APIServer: kubernetesSDHostURL(),
|
||||
Role: kubernetes.RoleEndpoint,
|
||||
NamespaceDiscovery: kubernetes.NamespaceDiscovery{
|
||||
Names: []string{
|
||||
"default",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -420,19 +409,17 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
MarathonSDConfigs: []*marathon.SDConfig{
|
||||
{
|
||||
Servers: []string{
|
||||
"https://marathon.example.com:443",
|
||||
},
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
AuthToken: config_util.Secret("mysecret"),
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&marathon.SDConfig{
|
||||
Servers: []string{
|
||||
"https://marathon.example.com:443",
|
||||
},
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
AuthToken: "mysecret",
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -448,24 +435,22 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
EC2SDConfigs: []*ec2.SDConfig{
|
||||
{
|
||||
Region: "us-east-1",
|
||||
AccessKey: "access",
|
||||
SecretKey: "mysecret",
|
||||
Profile: "profile",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Port: 80,
|
||||
Filters: []*ec2.Filter{
|
||||
{
|
||||
Name: "tag:environment",
|
||||
Values: []string{"prod"},
|
||||
},
|
||||
{
|
||||
Name: "tag:service",
|
||||
Values: []string{"web", "db"},
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&ec2.SDConfig{
|
||||
Region: "us-east-1",
|
||||
AccessKey: "access",
|
||||
SecretKey: "mysecret",
|
||||
Profile: "profile",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Port: 80,
|
||||
Filters: []*ec2.Filter{
|
||||
{
|
||||
Name: "tag:environment",
|
||||
Values: []string{"prod"},
|
||||
},
|
||||
{
|
||||
Name: "tag:service",
|
||||
Values: []string{"web", "db"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -481,18 +466,16 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
AzureSDConfigs: []*azure.SDConfig{
|
||||
{
|
||||
Environment: "AzurePublicCloud",
|
||||
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
|
||||
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
|
||||
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
|
||||
ClientSecret: "mysecret",
|
||||
AuthenticationMethod: "OAuth",
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Port: 9100,
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&azure.SDConfig{
|
||||
Environment: "AzurePublicCloud",
|
||||
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
|
||||
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
|
||||
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
|
||||
ClientSecret: "mysecret",
|
||||
AuthenticationMethod: "OAuth",
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Port: 9100,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -506,13 +489,11 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
NerveSDConfigs: []*zookeeper.NerveSDConfig{
|
||||
{
|
||||
Servers: []string{"localhost"},
|
||||
Paths: []string{"/monitoring"},
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&zookeeper.NerveSDConfig{
|
||||
Servers: []string{"localhost"},
|
||||
Paths: []string{"/monitoring"},
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -526,8 +507,8 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
discovery.StaticConfig{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: "localhost:9090"},
|
||||
|
@ -547,8 +528,8 @@ var expectedConf = &Config{
|
|||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
discovery.StaticConfig{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: "localhost:9090"},
|
||||
|
@ -568,8 +549,8 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
discovery.StaticConfig{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: "localhost:9090"},
|
||||
|
@ -589,20 +570,18 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
TritonSDConfigs: []*triton.SDConfig{
|
||||
{
|
||||
Account: "testAccount",
|
||||
Role: "container",
|
||||
DNSSuffix: "triton.example.com",
|
||||
Endpoint: "triton.example.com",
|
||||
Port: 9163,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Version: 1,
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&triton.SDConfig{
|
||||
Account: "testAccount",
|
||||
Role: "container",
|
||||
DNSSuffix: "triton.example.com",
|
||||
Endpoint: "triton.example.com",
|
||||
Port: 9163,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Version: 1,
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -617,15 +596,13 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
DigitalOceanSDConfigs: []*digitalocean.SDConfig{
|
||||
{
|
||||
HTTPClientConfig: config_util.HTTPClientConfig{
|
||||
BearerToken: "abcdef",
|
||||
},
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&digitalocean.SDConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BearerToken: "abcdef",
|
||||
},
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -639,14 +616,12 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
DockerSwarmSDConfigs: []*dockerswarm.SDConfig{
|
||||
{
|
||||
Host: "http://127.0.0.1:2375",
|
||||
Role: "nodes",
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&dockerswarm.SDConfig{
|
||||
Host: "http://127.0.0.1:2375",
|
||||
Role: "nodes",
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -660,21 +635,17 @@ var expectedConf = &Config{
|
|||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
OpenstackSDConfigs: []*openstack.SDConfig{
|
||||
{
|
||||
Role: "instance",
|
||||
Region: "RegionOne",
|
||||
Port: 80,
|
||||
Availability: "public",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
TLSConfig: config_util.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{
|
||||
Role: "instance",
|
||||
Region: "RegionOne",
|
||||
Port: 80,
|
||||
Availability: "public",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -684,8 +655,8 @@ var expectedConf = &Config{
|
|||
Scheme: "https",
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
APIVersion: AlertmanagerAPIVersionV1,
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
discovery.StaticConfig{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: "1.2.3.4:9093"},
|
||||
|
@ -699,7 +670,19 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
},
|
||||
original: "",
|
||||
}
|
||||
|
||||
func TestYAMLRoundtrip(t *testing.T) {
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml")
|
||||
testutil.Ok(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
||||
testutil.Ok(t, err)
|
||||
got := &Config{}
|
||||
testutil.Ok(t, yaml.UnmarshalStrict(out, got))
|
||||
|
||||
testutil.Equals(t, want, got)
|
||||
}
|
||||
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
|
@ -710,8 +693,6 @@ func TestLoadConfig(t *testing.T) {
|
|||
|
||||
c, err := LoadFile("testdata/conf.good.yml")
|
||||
testutil.Ok(t, err)
|
||||
|
||||
expectedConf.original = c.original
|
||||
testutil.Equals(t, expectedConf, c)
|
||||
}
|
||||
|
||||
|
@ -745,8 +726,6 @@ func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
|||
// Parse a valid file that sets a rule files with an absolute path
|
||||
c, err := LoadFile(ruleFilesConfigFile)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
ruleFilesExpectedConf.original = c.original
|
||||
testutil.Equals(t, ruleFilesExpectedConf, c)
|
||||
}
|
||||
|
||||
|
@ -834,7 +813,7 @@ var expectedErrors = []struct {
|
|||
errMsg: "invalid rule file path",
|
||||
}, {
|
||||
filename: "unknown_attr.bad.yml",
|
||||
errMsg: "field consult_sd_configs not found in type config.plain",
|
||||
errMsg: "field consult_sd_configs not found in type",
|
||||
}, {
|
||||
filename: "bearertoken.bad.yml",
|
||||
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
|
||||
|
@ -1022,11 +1001,10 @@ func TestEmptyGlobalBlock(t *testing.T) {
|
|||
c, err := Load("global:\n")
|
||||
testutil.Ok(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.original = "global:\n"
|
||||
testutil.Equals(t, exp, *c)
|
||||
}
|
||||
|
||||
func kubernetesSDHostURL() config_util.URL {
|
||||
func kubernetesSDHostURL() config.URL {
|
||||
tURL, _ := url.Parse("https://localhost:1234")
|
||||
return config_util.URL{URL: tURL}
|
||||
return config.URL{URL: tURL}
|
||||
}
|
||||
|
|
|
@ -22,5 +22,4 @@ var ruleFilesExpectedConf = &Config{
|
|||
"testdata\\rules\\second.rules",
|
||||
"c:\\absolute\\third.rules",
|
||||
},
|
||||
original: "",
|
||||
}
|
||||
|
|
143
config/testdata/roundtrip.good.yml
vendored
Normal file
143
config/testdata/roundtrip.good.yml
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: https
|
||||
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- foo/*.slow.json
|
||||
- foo/*.slow.yml
|
||||
refresh_interval: 10m
|
||||
- files:
|
||||
- bar/*.yaml
|
||||
|
||||
static_configs:
|
||||
- targets:
|
||||
- 1.2.3.4:9093
|
||||
- 1.2.3.5:9093
|
||||
- 1.2.3.6:9093
|
||||
|
||||
scrape_configs:
|
||||
|
||||
- job_name: foo
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
- localhost:9191
|
||||
labels:
|
||||
my: label
|
||||
your: label
|
||||
|
||||
- job_name: bar
|
||||
|
||||
azure_sd_configs:
|
||||
- environment: AzurePublicCloud
|
||||
authentication_method: OAuth
|
||||
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
|
||||
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
|
||||
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
|
||||
client_secret: <secret>
|
||||
port: 9100
|
||||
|
||||
consul_sd_configs:
|
||||
- server: localhost:1234
|
||||
token: <secret>
|
||||
services: [nginx, cache, mysql]
|
||||
tags: [canary, v1]
|
||||
node_meta:
|
||||
rack: "123"
|
||||
allow_stale: true
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: valid_ca_file
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
digitalocean_sd_configs:
|
||||
- bearer_token: <secret>
|
||||
|
||||
dockerswarm_sd_configs:
|
||||
- host: http://127.0.0.1:2375
|
||||
role: nodes
|
||||
|
||||
dns_sd_configs:
|
||||
- refresh_interval: 15s
|
||||
names:
|
||||
- first.dns.address.domain.com
|
||||
- second.dns.address.domain.com
|
||||
- names:
|
||||
- first.dns.address.domain.com
|
||||
|
||||
ec2_sd_configs:
|
||||
- region: us-east-1
|
||||
access_key: access
|
||||
secret_key: <secret>
|
||||
profile: profile
|
||||
filters:
|
||||
- name: tag:environment
|
||||
values:
|
||||
- prod
|
||||
- name: tag:service
|
||||
values:
|
||||
- web
|
||||
- db
|
||||
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- single/file.yml
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
api_server: https://localhost:1234
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
basic_auth:
|
||||
username: username
|
||||
password: <secret>
|
||||
- role: endpoints
|
||||
api_server: https://localhost:1234
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
basic_auth:
|
||||
username: username
|
||||
password_file: valid_password_file
|
||||
|
||||
marathon_sd_configs:
|
||||
- servers:
|
||||
- https://marathon.example.com:443
|
||||
auth_token: <secret>
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
nerve_sd_configs:
|
||||
- servers:
|
||||
- localhost
|
||||
paths:
|
||||
- /monitoring
|
||||
|
||||
openstack_sd_configs:
|
||||
- role: instance
|
||||
region: RegionOne
|
||||
port: 80
|
||||
refresh_interval: 1m
|
||||
tls_config:
|
||||
ca_file: valid_ca_file
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9093
|
||||
|
||||
triton_sd_configs:
|
||||
- account: testAccount
|
||||
dns_suffix: triton.example.com
|
||||
endpoint: triton.example.com
|
||||
port: 9163
|
||||
refresh_interval: 1m
|
||||
version: 1
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
|
@ -146,85 +146,115 @@ both cases.
|
|||
|
||||
For example if we had a discovery mechanism and it retrieves the following groups:
|
||||
|
||||
```
|
||||
```go
|
||||
[]targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__instance__": "10.11.150.1:7870",
|
||||
"hostname": "demo-target-1",
|
||||
"test": "simple-test",
|
||||
},
|
||||
{
|
||||
"__instance__": "10.11.150.4:7870",
|
||||
"hostname": "demo-target-2",
|
||||
"test": "simple-test",
|
||||
},
|
||||
},
|
||||
Labels: map[LabelName][LabelValue] {
|
||||
"job": "mysql",
|
||||
},
|
||||
"Source": "file1",
|
||||
},
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__instance__": "10.11.122.11:6001",
|
||||
"hostname": "demo-postgres-1",
|
||||
"test": "simple-test",
|
||||
},
|
||||
{
|
||||
"__instance__": "10.11.122.15:6001",
|
||||
"hostname": "demo-postgres-2",
|
||||
"test": "simple-test",
|
||||
},
|
||||
},
|
||||
Labels: map[LabelName][LabelValue] {
|
||||
"job": "postgres",
|
||||
},
|
||||
"Source": "file2",
|
||||
},
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__instance__": "10.11.150.1:7870",
|
||||
"hostname": "demo-target-1",
|
||||
"test": "simple-test",
|
||||
},
|
||||
{
|
||||
"__instance__": "10.11.150.4:7870",
|
||||
"hostname": "demo-target-2",
|
||||
"test": "simple-test",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"job": "mysql",
|
||||
},
|
||||
"Source": "file1",
|
||||
},
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__instance__": "10.11.122.11:6001",
|
||||
"hostname": "demo-postgres-1",
|
||||
"test": "simple-test",
|
||||
},
|
||||
{
|
||||
"__instance__": "10.11.122.15:6001",
|
||||
"hostname": "demo-postgres-2",
|
||||
"test": "simple-test",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"job": "postgres",
|
||||
},
|
||||
"Source": "file2",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Here there are two target groups one group with source `file1` and another with `file2`. The grouping is implementation specific and could even be one target per group. But, one has to make sure every target group sent by an SD instance should have a `Source` which is unique across all the target groups of that SD instance.
|
||||
|
||||
In this case, both the target groups are sent down the channel the first time `Run()` is called. Now, for an update, we need to send the whole _changed_ target group down the channel. i.e, if the target with `hostname: demo-postgres-2` goes away, we send:
|
||||
```
|
||||
```go
|
||||
&targetgroup.Group{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__instance__": "10.11.122.11:6001",
|
||||
"hostname": "demo-postgres-1",
|
||||
"test": "simple-test",
|
||||
},
|
||||
},
|
||||
Labels: map[LabelName][LabelValue] {
|
||||
"job": "postgres",
|
||||
},
|
||||
"Source": "file2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__instance__": "10.11.122.11:6001",
|
||||
"hostname": "demo-postgres-1",
|
||||
"test": "simple-test",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"job": "postgres",
|
||||
},
|
||||
"Source": "file2",
|
||||
}
|
||||
```
|
||||
down the channel.
|
||||
|
||||
If all the targets in a group go away, we need to send the target groups with empty `Targets` down the channel. i.e, if all targets with `job: postgres` go away, we send:
|
||||
```
|
||||
```go
|
||||
&targetgroup.Group{
|
||||
Targets: nil,
|
||||
"Source": "file2",
|
||||
Targets: nil,
|
||||
"Source": "file2",
|
||||
}
|
||||
```
|
||||
down the channel.
|
||||
|
||||
### The Config interface
|
||||
|
||||
Now that your service discovery mechanism is ready to discover targets, you must help
|
||||
Prometheus discover it. This is done by implementing the `discovery.Config` interface
|
||||
and registering it with `discovery.RegisterConfig` in an init function of your package.
|
||||
|
||||
```go
|
||||
type Config interface {
|
||||
// Name returns the name of the discovery mechanism.
|
||||
Name() string
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config
|
||||
// with the given DiscovererOptions.
|
||||
NewDiscoverer(DiscovererOptions) (Discoverer, error)
|
||||
}
|
||||
|
||||
type DiscovererOptions struct {
|
||||
Logger log.Logger
|
||||
}
|
||||
```
|
||||
|
||||
The value returned by `Name()` should be short, descriptive, lowercase, and unique.
|
||||
It's used to tag the provided `Logger` and as the part of the YAML key for your SD
|
||||
mechanism's list of configs in `scrape_config` and `alertmanager_config`
|
||||
(e.g. `${NAME}_sd_configs`).
|
||||
|
||||
### New Service Discovery Check List
|
||||
|
||||
Here are some non-obvious parts of adding service discoveries that need to be verified:
|
||||
|
||||
- Check for `nil` SDConfigs in `discovery/config/config.go`.
|
||||
- Validate that discovery configs can be DeepEqualled by adding them to
|
||||
`config/testdata/conf.good.yml` and to the associated tests.
|
||||
- If there is a TLSConfig or HTTPClientConfig, add them to
|
||||
`resolveFilepaths` in `config/config.go`.
|
||||
|
||||
- If the config contains file paths directly or indirectly (e.g. with a TLSConfig or
|
||||
HTTPClientConfig field), then it must implement `config.DirectorySetter`.
|
||||
|
||||
- Import your SD package from `prometheus/discovery/install`. The install package is
|
||||
imported from `main` to register all builtin SD mechanisms.
|
||||
|
||||
- List the service discovery in both `<scrape_config>` and
|
||||
`<alertmanager_config>` in `docs/configuration/configuration.md`.
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -64,6 +65,10 @@ var DefaultSDConfig = SDConfig{
|
|||
AuthenticationMethod: authMethodOAuth,
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for Azure based service discovery.
|
||||
type SDConfig struct {
|
||||
Environment string `yaml:"environment,omitempty"`
|
||||
|
@ -76,6 +81,14 @@ type SDConfig struct {
|
|||
AuthenticationMethod string `yaml:"authentication_method,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "azure" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger), nil
|
||||
}
|
||||
|
||||
func validateAuthParam(param, name string) error {
|
||||
if len(param) == 0 {
|
||||
return errors.Errorf("azure SD configuration requires a %s", name)
|
||||
|
|
|
@ -1,147 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/azure"
|
||||
"github.com/prometheus/prometheus/discovery/consul"
|
||||
"github.com/prometheus/prometheus/discovery/digitalocean"
|
||||
"github.com/prometheus/prometheus/discovery/dns"
|
||||
"github.com/prometheus/prometheus/discovery/dockerswarm"
|
||||
"github.com/prometheus/prometheus/discovery/ec2"
|
||||
"github.com/prometheus/prometheus/discovery/file"
|
||||
"github.com/prometheus/prometheus/discovery/gce"
|
||||
"github.com/prometheus/prometheus/discovery/kubernetes"
|
||||
"github.com/prometheus/prometheus/discovery/marathon"
|
||||
"github.com/prometheus/prometheus/discovery/openstack"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/discovery/triton"
|
||||
"github.com/prometheus/prometheus/discovery/zookeeper"
|
||||
)
|
||||
|
||||
// ServiceDiscoveryConfig configures lists of different service discovery mechanisms.
|
||||
type ServiceDiscoveryConfig struct {
|
||||
// List of labeled target groups for this job.
|
||||
StaticConfigs []*targetgroup.Group `yaml:"static_configs,omitempty"`
|
||||
// List of DNS service discovery configurations.
|
||||
DNSSDConfigs []*dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||
// List of file service discovery configurations.
|
||||
FileSDConfigs []*file.SDConfig `yaml:"file_sd_configs,omitempty"`
|
||||
// List of Consul service discovery configurations.
|
||||
ConsulSDConfigs []*consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||
// List of DigitalOcean service discovery configurations.
|
||||
DigitalOceanSDConfigs []*digitalocean.SDConfig `yaml:"digitalocean_sd_configs,omitempty"`
|
||||
// List of Docker Swarm service discovery configurations.
|
||||
DockerSwarmSDConfigs []*dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"`
|
||||
// List of Serverset service discovery configurations.
|
||||
ServersetSDConfigs []*zookeeper.ServersetSDConfig `yaml:"serverset_sd_configs,omitempty"`
|
||||
// NerveSDConfigs is a list of Nerve service discovery configurations.
|
||||
NerveSDConfigs []*zookeeper.NerveSDConfig `yaml:"nerve_sd_configs,omitempty"`
|
||||
// MarathonSDConfigs is a list of Marathon service discovery configurations.
|
||||
MarathonSDConfigs []*marathon.SDConfig `yaml:"marathon_sd_configs,omitempty"`
|
||||
// List of Kubernetes service discovery configurations.
|
||||
KubernetesSDConfigs []*kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"`
|
||||
// List of GCE service discovery configurations.
|
||||
GCESDConfigs []*gce.SDConfig `yaml:"gce_sd_configs,omitempty"`
|
||||
// List of EC2 service discovery configurations.
|
||||
EC2SDConfigs []*ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"`
|
||||
// List of OpenStack service discovery configurations.
|
||||
OpenstackSDConfigs []*openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"`
|
||||
// List of Azure service discovery configurations.
|
||||
AzureSDConfigs []*azure.SDConfig `yaml:"azure_sd_configs,omitempty"`
|
||||
// List of Triton service discovery configurations.
|
||||
TritonSDConfigs []*triton.SDConfig `yaml:"triton_sd_configs,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates the ServiceDiscoveryConfig.
|
||||
func (c *ServiceDiscoveryConfig) Validate() error {
|
||||
for _, cfg := range c.AzureSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in azure_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.ConsulSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in consul_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.DigitalOceanSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in digitalocean_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.DockerSwarmSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in dockerswarm_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.DNSSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in dns_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.EC2SDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in ec2_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.FileSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in file_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.GCESDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in gce_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.KubernetesSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in kubernetes_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.MarathonSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in marathon_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.NerveSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in nerve_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.OpenstackSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in openstack_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.ServersetSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in serverset_sd_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.StaticConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in static_configs")
|
||||
}
|
||||
}
|
||||
for _, cfg := range c.TritonSDConfigs {
|
||||
if cfg == nil {
|
||||
return errors.New("empty or null section in triton_sd_configs")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestForNilSDConfig(t *testing.T) {
|
||||
// Get all the yaml fields names of the ServiceDiscoveryConfig struct.
|
||||
s := reflect.ValueOf(ServiceDiscoveryConfig{})
|
||||
configType := s.Type()
|
||||
n := s.NumField()
|
||||
fieldsSlice := make([]string, n)
|
||||
for i := 0; i < n; i++ {
|
||||
field := configType.Field(i)
|
||||
tag := field.Tag.Get("yaml")
|
||||
tag = strings.Split(tag, ",")[0]
|
||||
fieldsSlice = append(fieldsSlice, tag)
|
||||
}
|
||||
|
||||
// Unmarshall all possible yaml keys and validate errors check upon nil
|
||||
// SD config.
|
||||
for _, f := range fieldsSlice {
|
||||
if f == "" {
|
||||
continue
|
||||
}
|
||||
t.Run(f, func(t *testing.T) {
|
||||
c := &ServiceDiscoveryConfig{}
|
||||
err := yaml.Unmarshal([]byte(fmt.Sprintf(`
|
||||
---
|
||||
%s:
|
||||
-
|
||||
`, f)), c)
|
||||
testutil.Ok(t, err)
|
||||
err = c.Validate()
|
||||
testutil.NotOk(t, err)
|
||||
testutil.Equals(t, fmt.Sprintf("empty or null section in %s", f), err.Error())
|
||||
})
|
||||
}
|
||||
}
|
|
@ -28,9 +28,10 @@ import (
|
|||
conntrack "github.com/mwitkow/go-conntrack"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
@ -99,15 +100,21 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
prometheus.MustRegister(rpcFailuresCount)
|
||||
prometheus.MustRegister(rpcDuration)
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for Consul service discovery.
|
||||
type SDConfig struct {
|
||||
Server string `yaml:"server,omitempty"`
|
||||
Token config_util.Secret `yaml:"token,omitempty"`
|
||||
Datacenter string `yaml:"datacenter,omitempty"`
|
||||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
Scheme string `yaml:"scheme,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password config_util.Secret `yaml:"password,omitempty"`
|
||||
Server string `yaml:"server,omitempty"`
|
||||
Token config.Secret `yaml:"token,omitempty"`
|
||||
Datacenter string `yaml:"datacenter,omitempty"`
|
||||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
Scheme string `yaml:"scheme,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password config.Secret `yaml:"password,omitempty"`
|
||||
|
||||
// See https://www.consul.io/docs/internals/consensus.html#consistency-modes,
|
||||
// stale reads are a lot cheaper and are a necessity if you have >5k targets.
|
||||
|
@ -127,7 +134,20 @@ type SDConfig struct {
|
|||
// Desired node metadata.
|
||||
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
||||
|
||||
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "consul" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.TLSConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -144,11 +164,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(rpcFailuresCount)
|
||||
prometheus.MustRegister(rpcDuration)
|
||||
}
|
||||
|
||||
// Discovery retrieves target information from a Consul server
|
||||
// and updates them via watches.
|
||||
type Discovery struct {
|
||||
|
@ -170,7 +185,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
tls, err := config_util.NewTLSConfig(&conf.TLSConfig)
|
||||
tls, err := config.NewTLSConfig(&conf.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -24,10 +24,11 @@ import (
|
|||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/go-kit/kit/log"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -54,14 +55,31 @@ var DefaultSDConfig = SDConfig{
|
|||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for DigitalOcean based service discovery.
|
||||
type SDConfig struct {
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "digitalocean" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
|
@ -87,7 +105,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
port: conf.Port,
|
||||
}
|
||||
|
||||
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", false)
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
117
discovery/discovery.go
Normal file
117
discovery/discovery.go
Normal file
|
@ -0,0 +1,117 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
// Discoverer provides information about target groups. It maintains a set
|
||||
// of sources from which TargetGroups can originate. Whenever a discovery provider
|
||||
// detects a potential change, it sends the TargetGroup through its channel.
|
||||
//
|
||||
// Discoverer does not know if an actual change happened.
|
||||
// It does guarantee that it sends the new TargetGroup whenever a change happens.
|
||||
//
|
||||
// Discoverers should initially send a full set of all discoverable TargetGroups.
|
||||
type Discoverer interface {
|
||||
// Run hands a channel to the discovery provider (Consul, DNS, etc.) through which
|
||||
// it can send updated target groups. It must return when the context is canceled.
|
||||
// It should not close the update channel on returning.
|
||||
Run(ctx context.Context, up chan<- []*targetgroup.Group)
|
||||
}
|
||||
|
||||
// DiscovererOptions provides options for a Discoverer.
|
||||
type DiscovererOptions struct {
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
// A Config provides the configuration and constructor for a Discoverer.
|
||||
type Config interface {
|
||||
// Name returns the name of the discovery mechanism.
|
||||
Name() string
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config
|
||||
// with the given DiscovererOptions.
|
||||
NewDiscoverer(DiscovererOptions) (Discoverer, error)
|
||||
}
|
||||
|
||||
// Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling
|
||||
// to represent itself as a mapping of the Config values grouped by their types.
|
||||
type Configs []Config
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *Configs) SetDirectory(dir string) {
|
||||
for _, c := range *c {
|
||||
if v, ok := c.(config.DirectorySetter); ok {
|
||||
v.SetDirectory(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements yaml.Unmarshaler.
|
||||
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
cfgTyp := getConfigType(configsType)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
if err := unmarshal(cfgPtr.Interface()); err != nil {
|
||||
return replaceYAMLTypeError(err, cfgTyp, configsType)
|
||||
}
|
||||
|
||||
var err error
|
||||
*c, err = readConfigs(cfgVal, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalYAML implements yaml.Marshaler.
|
||||
func (c Configs) MarshalYAML() (interface{}, error) {
|
||||
cfgTyp := getConfigType(configsType)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
if err := writeConfigs(cfgVal, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfgPtr.Interface(), nil
|
||||
}
|
||||
|
||||
// A StaticConfig is a Config that provides a static list of targets.
|
||||
type StaticConfig []*targetgroup.Group
|
||||
|
||||
// Name returns the name of the service discovery mechanism.
|
||||
func (StaticConfig) Name() string { return "static" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
|
||||
return staticDiscoverer(c), nil
|
||||
}
|
||||
|
||||
type staticDiscoverer []*targetgroup.Group
|
||||
|
||||
func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) {
|
||||
// TODO: existing implementation closes up chan, but documentation explicitly forbids it...?
|
||||
defer close(up)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case up <- c:
|
||||
}
|
||||
}
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -65,6 +66,12 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
prometheus.MustRegister(dnsSDLookupFailuresCount)
|
||||
prometheus.MustRegister(dnsSDLookupsCount)
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for DNS based service discovery.
|
||||
type SDConfig struct {
|
||||
Names []string `yaml:"names"`
|
||||
|
@ -73,6 +80,14 @@ type SDConfig struct {
|
|||
Port int `yaml:"port"` // Ignored for SRV records
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "dns" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(*c, opts.Logger), nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
|
@ -96,11 +111,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(dnsSDLookupFailuresCount)
|
||||
prometheus.MustRegister(dnsSDLookupsCount)
|
||||
}
|
||||
|
||||
// Discovery periodically performs DNS-SD requests. It implements
|
||||
// the Discoverer interface.
|
||||
type Discovery struct {
|
||||
|
|
|
@ -22,10 +22,11 @@ import (
|
|||
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-kit/kit/log"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -42,9 +43,13 @@ var DefaultSDConfig = SDConfig{
|
|||
Port: 80,
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for Docker Swarm based service discovery.
|
||||
type SDConfig struct {
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
|
||||
Host string `yaml:"host"`
|
||||
Role string `yaml:"role"`
|
||||
|
@ -53,6 +58,19 @@ type SDConfig struct {
|
|||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "dockerswarm" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
|
@ -109,7 +127,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
// unix, which are not supported by the HTTP client. Passing HTTP client
|
||||
// options to the Docker client makes those non-HTTP requests fail.
|
||||
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
|
||||
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false)
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -28,9 +28,10 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/pkg/errors"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -64,6 +65,10 @@ var DefaultSDConfig = SDConfig{
|
|||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// Filter is the configuration for filtering EC2 instances.
|
||||
type Filter struct {
|
||||
Name string `yaml:"name"`
|
||||
|
@ -72,15 +77,23 @@ type Filter struct {
|
|||
|
||||
// SDConfig is the configuration for EC2 based service discovery.
|
||||
type SDConfig struct {
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Region string `yaml:"region"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey config_util.Secret `yaml:"secret_key,omitempty"`
|
||||
Profile string `yaml:"profile,omitempty"`
|
||||
RoleARN string `yaml:"role_arn,omitempty"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
Port int `yaml:"port"`
|
||||
Filters []*Filter `yaml:"filters"`
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Region string `yaml:"region"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey config.Secret `yaml:"secret_key,omitempty"`
|
||||
Profile string `yaml:"profile,omitempty"`
|
||||
RoleARN string `yaml:"role_arn,omitempty"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
Port int `yaml:"port"`
|
||||
Filters []*Filter `yaml:"filters"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "ec2" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger), nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
|
|
@ -29,10 +29,12 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
fsnotify "gopkg.in/fsnotify/fsnotify.v1"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -45,12 +47,31 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for file based discovery.
|
||||
type SDConfig struct {
|
||||
Files []string `yaml:"files"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "file" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger), nil
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
for i, file := range c.Files {
|
||||
c.Files[i] = config.JoinDir(dir, file)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -57,6 +58,10 @@ var DefaultSDConfig = SDConfig{
|
|||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for GCE based service discovery.
|
||||
type SDConfig struct {
|
||||
// Project: The Google Cloud Project ID
|
||||
|
@ -76,6 +81,14 @@ type SDConfig struct {
|
|||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "gce" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(*c, opts.Logger)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
|
|
32
discovery/install/install.go
Normal file
32
discovery/install/install.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package install has the side-effect of registering all builtin
|
||||
// service discovery config types.
|
||||
package install
|
||||
|
||||
import (
|
||||
_ "github.com/prometheus/prometheus/discovery/azure" // register azure
|
||||
_ "github.com/prometheus/prometheus/discovery/consul" // register consul
|
||||
_ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean
|
||||
_ "github.com/prometheus/prometheus/discovery/dns" // register dns
|
||||
_ "github.com/prometheus/prometheus/discovery/dockerswarm" // register dockerswarm
|
||||
_ "github.com/prometheus/prometheus/discovery/ec2" // register ec2
|
||||
_ "github.com/prometheus/prometheus/discovery/file" // register file
|
||||
_ "github.com/prometheus/prometheus/discovery/gce" // register gce
|
||||
_ "github.com/prometheus/prometheus/discovery/kubernetes" // register kubernetes
|
||||
_ "github.com/prometheus/prometheus/discovery/marathon" // register marathon
|
||||
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
|
||||
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
|
||||
_ "github.com/prometheus/prometheus/discovery/zookeeper" // register zookeeper
|
||||
)
|
|
@ -25,8 +25,9 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
|
@ -38,7 +39,7 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -67,6 +68,19 @@ var (
|
|||
DefaultSDConfig = SDConfig{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
prometheus.MustRegister(eventCount)
|
||||
// Initialize metric vectors.
|
||||
for _, role := range []string{"endpointslice", "endpoints", "node", "pod", "service", "ingress"} {
|
||||
for _, evt := range []string{"add", "delete", "update"} {
|
||||
eventCount.WithLabelValues(role, evt)
|
||||
}
|
||||
}
|
||||
(&clientGoRequestMetricAdapter{}).Register(prometheus.DefaultRegisterer)
|
||||
(&clientGoWorkqueueMetricsProvider{}).Register(prometheus.DefaultRegisterer)
|
||||
}
|
||||
|
||||
// Role is role of the service in Kubernetes.
|
||||
type Role string
|
||||
|
||||
|
@ -95,11 +109,24 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
|
||||
// SDConfig is the configuration for Kubernetes service discovery.
|
||||
type SDConfig struct {
|
||||
APIServer config_util.URL `yaml:"api_server,omitempty"`
|
||||
Role Role `yaml:"role"`
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
|
||||
Selectors []SelectorConfig `yaml:"selectors,omitempty"`
|
||||
APIServer config.URL `yaml:"api_server,omitempty"`
|
||||
Role Role `yaml:"role"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
|
||||
Selectors []SelectorConfig `yaml:"selectors,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "kubernetes" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return New(opts.Logger, c)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
type roleSelector struct {
|
||||
|
@ -137,7 +164,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config_util.HTTPClientConfig{}) {
|
||||
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.HTTPClientConfig{}) {
|
||||
return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
|
||||
}
|
||||
|
||||
|
@ -197,31 +224,6 @@ func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) er
|
|||
return unmarshal((*plain)(c))
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(eventCount)
|
||||
|
||||
// Initialize metric vectors.
|
||||
for _, role := range []string{"endpointslice", "endpoints", "node", "pod", "service", "ingress"} {
|
||||
for _, evt := range []string{"add", "delete", "update"} {
|
||||
eventCount.WithLabelValues(role, evt)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
clientGoRequestMetricAdapterInstance = clientGoRequestMetricAdapter{}
|
||||
clientGoWorkqueueMetricsProviderInstance = clientGoWorkqueueMetricsProvider{}
|
||||
)
|
||||
|
||||
clientGoRequestMetricAdapterInstance.Register(prometheus.DefaultRegisterer)
|
||||
clientGoWorkqueueMetricsProviderInstance.Register(prometheus.DefaultRegisterer)
|
||||
|
||||
}
|
||||
|
||||
// This is only for internal use.
|
||||
type discoverer interface {
|
||||
Run(ctx context.Context, up chan<- []*targetgroup.Group)
|
||||
}
|
||||
|
||||
// Discovery implements the discoverer interface for discovering
|
||||
// targets from Kubernetes.
|
||||
type Discovery struct {
|
||||
|
@ -230,7 +232,7 @@ type Discovery struct {
|
|||
role Role
|
||||
logger log.Logger
|
||||
namespaceDiscovery *NamespaceDiscovery
|
||||
discoverers []discoverer
|
||||
discoverers []discovery.Discoverer
|
||||
selectors roleSelector
|
||||
}
|
||||
|
||||
|
@ -260,7 +262,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||
}
|
||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||
} else {
|
||||
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", false)
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -281,7 +283,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||
logger: l,
|
||||
role: conf.Role,
|
||||
namespaceDiscovery: &conf.NamespaceDiscovery,
|
||||
discoverers: make([]discoverer, 0),
|
||||
discoverers: make([]discovery.Discoverer, 0),
|
||||
selectors: mapSelector(conf.Selectors),
|
||||
}, nil
|
||||
}
|
||||
|
@ -517,7 +519,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
var wg sync.WaitGroup
|
||||
for _, dd := range d.discoverers {
|
||||
wg.Add(1)
|
||||
go func(d discoverer) {
|
||||
go func(d discovery.Discoverer) {
|
||||
defer wg.Done()
|
||||
d.Run(ctx, ch)
|
||||
}(dd)
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
@ -47,7 +48,7 @@ func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime
|
|||
|
||||
type k8sDiscoveryTest struct {
|
||||
// discovery is instance of discovery.Discoverer
|
||||
discovery discoverer
|
||||
discovery discovery.Discoverer
|
||||
// beforeRun runs before discoverer run
|
||||
beforeRun func()
|
||||
// afterStart runs after discoverer has synced
|
||||
|
|
|
@ -24,22 +24,7 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
sd_config "github.com/prometheus/prometheus/discovery/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/azure"
|
||||
"github.com/prometheus/prometheus/discovery/consul"
|
||||
"github.com/prometheus/prometheus/discovery/digitalocean"
|
||||
"github.com/prometheus/prometheus/discovery/dns"
|
||||
"github.com/prometheus/prometheus/discovery/dockerswarm"
|
||||
"github.com/prometheus/prometheus/discovery/ec2"
|
||||
"github.com/prometheus/prometheus/discovery/file"
|
||||
"github.com/prometheus/prometheus/discovery/gce"
|
||||
"github.com/prometheus/prometheus/discovery/kubernetes"
|
||||
"github.com/prometheus/prometheus/discovery/marathon"
|
||||
"github.com/prometheus/prometheus/discovery/openstack"
|
||||
"github.com/prometheus/prometheus/discovery/triton"
|
||||
"github.com/prometheus/prometheus/discovery/zookeeper"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -84,22 +69,6 @@ func init() {
|
|||
prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates)
|
||||
}
|
||||
|
||||
// Discoverer provides information about target groups. It maintains a set
|
||||
// of sources from which TargetGroups can originate. Whenever a discovery provider
|
||||
// detects a potential change, it sends the TargetGroup through its channel.
|
||||
//
|
||||
// Discoverer does not know if an actual change happened.
|
||||
// It does guarantee that it sends the new TargetGroup whenever a change happens.
|
||||
//
|
||||
// Discoverers should initially send a full set of all discoverable TargetGroups.
|
||||
type Discoverer interface {
|
||||
// Run hands a channel to the discovery provider (Consul, DNS etc) through which it can send
|
||||
// updated target groups.
|
||||
// Must returns if the context gets canceled. It should not close the update
|
||||
// channel on returning.
|
||||
Run(ctx context.Context, up chan<- []*targetgroup.Group)
|
||||
}
|
||||
|
||||
type poolKey struct {
|
||||
setName string
|
||||
provider string
|
||||
|
@ -183,7 +152,7 @@ func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
|
|||
}
|
||||
|
||||
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
|
||||
func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) error {
|
||||
func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
@ -324,13 +293,12 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
|||
}
|
||||
|
||||
// registerProviders returns a number of failed SD config.
|
||||
func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setName string) int {
|
||||
func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
||||
var (
|
||||
failedCount int
|
||||
added bool
|
||||
failed int
|
||||
added bool
|
||||
)
|
||||
add := func(cfg interface{}, newDiscoverer func() (Discoverer, error)) {
|
||||
t := reflect.TypeOf(cfg).String()
|
||||
add := func(cfg Config) {
|
||||
for _, p := range m.providers {
|
||||
if reflect.DeepEqual(cfg, p.config) {
|
||||
p.subs = append(p.subs, setName)
|
||||
|
@ -338,98 +306,25 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam
|
|||
return
|
||||
}
|
||||
}
|
||||
|
||||
d, err := newDiscoverer()
|
||||
typ := cfg.Name()
|
||||
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ),
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", t)
|
||||
failedCount++
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ)
|
||||
failed++
|
||||
return
|
||||
}
|
||||
|
||||
provider := provider{
|
||||
name: fmt.Sprintf("%s/%d", t, len(m.providers)),
|
||||
m.providers = append(m.providers, &provider{
|
||||
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
|
||||
d: d,
|
||||
config: cfg,
|
||||
subs: []string{setName},
|
||||
}
|
||||
m.providers = append(m.providers, &provider)
|
||||
})
|
||||
added = true
|
||||
}
|
||||
|
||||
for _, c := range cfg.DNSSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return dns.NewDiscovery(*c, log.With(m.logger, "discovery", "dns")), nil
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.FileSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return file.NewDiscovery(c, log.With(m.logger, "discovery", "file")), nil
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.ConsulSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return consul.NewDiscovery(c, log.With(m.logger, "discovery", "consul"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.DigitalOceanSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return digitalocean.NewDiscovery(c, log.With(m.logger, "discovery", "digitalocean"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.DockerSwarmSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return dockerswarm.NewDiscovery(c, log.With(m.logger, "discovery", "dockerswarm"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.MarathonSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return marathon.NewDiscovery(*c, log.With(m.logger, "discovery", "marathon"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.KubernetesSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return kubernetes.New(log.With(m.logger, "discovery", "k8s"), c)
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.ServersetSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return zookeeper.NewServersetDiscovery(c, log.With(m.logger, "discovery", "zookeeper"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.NerveSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return zookeeper.NewNerveDiscovery(c, log.With(m.logger, "discovery", "nerve"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.EC2SDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return ec2.NewDiscovery(c, log.With(m.logger, "discovery", "ec2")), nil
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.OpenstackSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return openstack.NewDiscovery(c, log.With(m.logger, "discovery", "openstack"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.GCESDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return gce.NewDiscovery(*c, log.With(m.logger, "discovery", "gce"))
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.AzureSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return azure.NewDiscovery(c, log.With(m.logger, "discovery", "azure")), nil
|
||||
})
|
||||
}
|
||||
for _, c := range cfg.TritonSDConfigs {
|
||||
add(c, func() (Discoverer, error) {
|
||||
return triton.New(log.With(m.logger, "discovery", "triton"), c)
|
||||
})
|
||||
}
|
||||
if len(cfg.StaticConfigs) > 0 {
|
||||
add(setName, func() (Discoverer, error) {
|
||||
return &StaticProvider{TargetGroups: cfg.StaticConfigs}, nil
|
||||
})
|
||||
for _, cfg := range cfgs {
|
||||
add(cfg)
|
||||
}
|
||||
if !added {
|
||||
// Add an empty target group to force the refresh of the corresponding
|
||||
|
@ -437,11 +332,9 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam
|
|||
// current targets.
|
||||
// It can happen because the combined set of SD configurations is empty
|
||||
// or because we fail to instantiate all the SD configurations.
|
||||
add(setName, func() (Discoverer, error) {
|
||||
return &StaticProvider{TargetGroups: []*targetgroup.Group{{}}}, nil
|
||||
})
|
||||
add(StaticConfig{{}})
|
||||
}
|
||||
return failedCount
|
||||
return failed
|
||||
}
|
||||
|
||||
// StaticProvider holds a list of target groups that never change.
|
||||
|
|
|
@ -16,8 +16,6 @@ package discovery
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -26,15 +24,9 @@ import (
|
|||
|
||||
"github.com/go-kit/kit/log"
|
||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
common_config "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
sd_config "github.com/prometheus/prometheus/discovery/config"
|
||||
"github.com/prometheus/prometheus/discovery/consul"
|
||||
"github.com/prometheus/prometheus/discovery/file"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -725,6 +717,19 @@ func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg fun
|
|||
|
||||
}
|
||||
|
||||
func staticConfig(addrs ...string) StaticConfig {
|
||||
var cfg StaticConfig
|
||||
for i, addr := range addrs {
|
||||
cfg = append(cfg, &targetgroup.Group{
|
||||
Source: fmt.Sprint(i),
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: model.LabelValue(addr)},
|
||||
},
|
||||
})
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) {
|
||||
t.Helper()
|
||||
if _, ok := tSets[poolKey]; !ok {
|
||||
|
@ -760,51 +765,46 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
|||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]sd_config.ServiceDiscoveryConfig{
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
{
|
||||
Source: "0",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("foo:9090"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Source: "1",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("bar:9090"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
staticConfig("foo:9090", "bar:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
|
||||
c["prometheus"] = sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
{
|
||||
Source: "0",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("foo:9090"),
|
||||
},
|
||||
},
|
||||
},
|
||||
c["prometheus"] = Configs{
|
||||
staticConfig("foo:9090"),
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false)
|
||||
}
|
||||
|
||||
func TestDiscovererConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
staticConfig("foo:9090", "bar:9090"),
|
||||
staticConfig("baz:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"bar:9090\"}", false)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true)
|
||||
}
|
||||
|
||||
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
|
||||
|
@ -817,33 +817,24 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]sd_config.ServiceDiscoveryConfig{
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
{
|
||||
Source: "0",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("foo:9090"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
|
||||
c["prometheus"] = sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{},
|
||||
c["prometheus"] = Configs{
|
||||
StaticConfig{{}},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
|
||||
pkey := poolKey{setName: "prometheus", provider: "string/0"}
|
||||
pkey := poolKey{setName: "prometheus", provider: "static/0"}
|
||||
targetGroups, ok := discoveryManager.targets[pkey]
|
||||
if !ok {
|
||||
t.Fatalf("'%v' should be present in target groups", pkey)
|
||||
|
@ -859,78 +850,36 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "sd")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temporary file: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
if _, err := tmpFile.Write([]byte(`[{"targets": ["foo:9090"]}]`)); err != nil {
|
||||
t.Fatalf("error writing temporary file: %v", err)
|
||||
}
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
t.Fatalf("error closing temporary file: %v", err)
|
||||
}
|
||||
tmpFile2 := fmt.Sprintf("%s.json", tmpFile.Name())
|
||||
if err = os.Link(tmpFile.Name(), tmpFile2); err != nil {
|
||||
t.Fatalf("error linking temporary file: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpFile2)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, nil)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]sd_config.ServiceDiscoveryConfig{
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
FileSDConfigs: []*file.SDConfig{
|
||||
{
|
||||
Files: []string{
|
||||
tmpFile2,
|
||||
},
|
||||
RefreshInterval: file.DefaultSDConfig.RefreshInterval,
|
||||
},
|
||||
},
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
"prometheus2": {
|
||||
FileSDConfigs: []*file.SDConfig{
|
||||
{
|
||||
Files: []string{
|
||||
tmpFile2,
|
||||
},
|
||||
RefreshInterval: file.DefaultSDConfig.RefreshInterval,
|
||||
},
|
||||
},
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "*file.SDConfig/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "*file.SDConfig/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
if len(discoveryManager.providers) != 1 {
|
||||
t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers))
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyConfigDoesNotModifyStaticProviderTargets(t *testing.T) {
|
||||
cfgText := `
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ["foo:9090"]
|
||||
- targets: ["bar:9090"]
|
||||
- targets: ["baz:9090"]
|
||||
`
|
||||
originalConfig := &config.Config{}
|
||||
if err := yaml.UnmarshalStrict([]byte(cfgText), originalConfig); err != nil {
|
||||
t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
|
||||
func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||
originalConfig := Configs{
|
||||
staticConfig("foo:9090", "bar:9090", "baz:9090"),
|
||||
}
|
||||
|
||||
processedConfig := &config.Config{}
|
||||
if err := yaml.UnmarshalStrict([]byte(cfgText), processedConfig); err != nil {
|
||||
t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
|
||||
processedConfig := Configs{
|
||||
staticConfig("foo:9090", "bar:9090", "baz:9090"),
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -938,21 +887,25 @@ scrape_configs:
|
|||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]sd_config.ServiceDiscoveryConfig{
|
||||
"prometheus": processedConfig.ScrapeConfigs[0].ServiceDiscoveryConfig,
|
||||
cfgs := map[string]Configs{
|
||||
"prometheus": processedConfig,
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
discoveryManager.ApplyConfig(cfgs)
|
||||
<-discoveryManager.SyncCh()
|
||||
|
||||
origSdcfg := originalConfig.ScrapeConfigs[0].ServiceDiscoveryConfig
|
||||
for _, sdcfg := range c {
|
||||
if !reflect.DeepEqual(origSdcfg.StaticConfigs, sdcfg.StaticConfigs) {
|
||||
for _, cfg := range cfgs {
|
||||
if !reflect.DeepEqual(originalConfig, cfg) {
|
||||
t.Fatalf("discovery manager modified static config \n expected: %v\n got: %v\n",
|
||||
origSdcfg.StaticConfigs, sdcfg.StaticConfigs)
|
||||
originalConfig, cfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type errorConfig struct{ err error }
|
||||
|
||||
func (e errorConfig) Name() string { return "error" }
|
||||
func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err }
|
||||
|
||||
func TestGaugeFailedConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -960,28 +913,11 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
|||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]sd_config.ServiceDiscoveryConfig{
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
ConsulSDConfigs: []*consul.SDConfig{
|
||||
{
|
||||
Server: "foo:8500",
|
||||
TLSConfig: common_config.TLSConfig{
|
||||
CertFile: "/tmp/non_existent",
|
||||
},
|
||||
},
|
||||
{
|
||||
Server: "bar:8500",
|
||||
TLSConfig: common_config.TLSConfig{
|
||||
CertFile: "/tmp/non_existent",
|
||||
},
|
||||
},
|
||||
{
|
||||
Server: "foo2:8500",
|
||||
TLSConfig: common_config.TLSConfig{
|
||||
CertFile: "/tmp/non_existent",
|
||||
},
|
||||
},
|
||||
},
|
||||
errorConfig{fmt.Errorf("tests error 0")},
|
||||
errorConfig{fmt.Errorf("tests error 1")},
|
||||
errorConfig{fmt.Errorf("tests error 2")},
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
@ -992,17 +928,8 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
|||
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
|
||||
}
|
||||
|
||||
c["prometheus"] = sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
{
|
||||
Source: "0",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: "foo:9090",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
c["prometheus"] = Configs{
|
||||
staticConfig("foo:9090"),
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
<-discoveryManager.SyncCh()
|
||||
|
|
|
@ -28,9 +28,10 @@ import (
|
|||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/pkg/errors"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -63,13 +64,31 @@ var DefaultSDConfig = SDConfig{
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for services running on Marathon.
|
||||
type SDConfig struct {
|
||||
Servers []string `yaml:"servers,omitempty"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
AuthToken config_util.Secret `yaml:"auth_token,omitempty"`
|
||||
AuthTokenFile string `yaml:"auth_token_file,omitempty"`
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
Servers []string `yaml:"servers,omitempty"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
AuthToken config.Secret `yaml:"auth_token,omitempty"`
|
||||
AuthTokenFile string `yaml:"auth_token_file,omitempty"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "marathon" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(*c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
c.AuthTokenFile = config.JoinDir(dir, c.AuthTokenFile)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -108,7 +127,7 @@ type Discovery struct {
|
|||
|
||||
// NewDiscovery returns a new Marathon Discovery.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", false)
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,12 +156,12 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
}
|
||||
|
||||
type authTokenRoundTripper struct {
|
||||
authToken config_util.Secret
|
||||
authToken config.Secret
|
||||
rt http.RoundTripper
|
||||
}
|
||||
|
||||
// newAuthTokenRoundTripper adds the provided auth token to a request.
|
||||
func newAuthTokenRoundTripper(token config_util.Secret, rt http.RoundTripper) (http.RoundTripper, error) {
|
||||
func newAuthTokenRoundTripper(token config.Secret, rt http.RoundTripper) (http.RoundTripper, error) {
|
||||
return &authTokenRoundTripper{token, rt}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,9 +24,10 @@ import (
|
|||
"github.com/gophercloud/gophercloud/openstack"
|
||||
conntrack "github.com/mwitkow/go-conntrack"
|
||||
"github.com/pkg/errors"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -38,26 +39,43 @@ var DefaultSDConfig = SDConfig{
|
|||
Availability: "public",
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for OpenStack based service discovery.
|
||||
type SDConfig struct {
|
||||
IdentityEndpoint string `yaml:"identity_endpoint"`
|
||||
Username string `yaml:"username"`
|
||||
UserID string `yaml:"userid"`
|
||||
Password config_util.Secret `yaml:"password"`
|
||||
ProjectName string `yaml:"project_name"`
|
||||
ProjectID string `yaml:"project_id"`
|
||||
DomainName string `yaml:"domain_name"`
|
||||
DomainID string `yaml:"domain_id"`
|
||||
ApplicationCredentialName string `yaml:"application_credential_name"`
|
||||
ApplicationCredentialID string `yaml:"application_credential_id"`
|
||||
ApplicationCredentialSecret config_util.Secret `yaml:"application_credential_secret"`
|
||||
Role Role `yaml:"role"`
|
||||
Region string `yaml:"region"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
Port int `yaml:"port"`
|
||||
AllTenants bool `yaml:"all_tenants,omitempty"`
|
||||
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
Availability string `yaml:"availability,omitempty"`
|
||||
IdentityEndpoint string `yaml:"identity_endpoint"`
|
||||
Username string `yaml:"username"`
|
||||
UserID string `yaml:"userid"`
|
||||
Password config.Secret `yaml:"password"`
|
||||
ProjectName string `yaml:"project_name"`
|
||||
ProjectID string `yaml:"project_id"`
|
||||
DomainName string `yaml:"domain_name"`
|
||||
DomainID string `yaml:"domain_id"`
|
||||
ApplicationCredentialName string `yaml:"application_credential_name"`
|
||||
ApplicationCredentialID string `yaml:"application_credential_id"`
|
||||
ApplicationCredentialSecret config.Secret `yaml:"application_credential_secret"`
|
||||
Role Role `yaml:"role"`
|
||||
Region string `yaml:"region"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
Port int `yaml:"port"`
|
||||
AllTenants bool `yaml:"all_tenants,omitempty"`
|
||||
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
Availability string `yaml:"availability,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "openstack" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.TLSConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// Role is the role of the target in OpenStack.
|
||||
|
@ -157,7 +175,7 @@ func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tls, err := config_util.NewTLSConfig(&conf.TLSConfig)
|
||||
tls, err := config.NewTLSConfig(&conf.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
257
discovery/registry.go
Normal file
257
discovery/registry.go
Normal file
|
@ -0,0 +1,257 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
configFieldPrefix = "AUTO_DISCOVERY_"
|
||||
staticConfigsKey = "static_configs"
|
||||
staticConfigsFieldName = configFieldPrefix + staticConfigsKey
|
||||
)
|
||||
|
||||
var (
|
||||
configNames = make(map[string]Config)
|
||||
configFieldNames = make(map[reflect.Type]string)
|
||||
configFields []reflect.StructField
|
||||
|
||||
configTypesMu sync.Mutex
|
||||
configTypes = make(map[reflect.Type]reflect.Type)
|
||||
|
||||
emptyStructType = reflect.TypeOf(struct{}{})
|
||||
configsType = reflect.TypeOf(Configs{})
|
||||
)
|
||||
|
||||
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
|
||||
func RegisterConfig(config Config) {
|
||||
registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// N.B.: static_configs is the only Config type implemented by default.
|
||||
// All other types are registered at init by their implementing packages.
|
||||
elemTyp := reflect.TypeOf(&targetgroup.Group{})
|
||||
registerConfig(staticConfigsKey, elemTyp, StaticConfig{})
|
||||
}
|
||||
|
||||
func registerConfig(yamlKey string, elemType reflect.Type, config Config) {
|
||||
name := config.Name()
|
||||
if _, ok := configNames[name]; ok {
|
||||
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
|
||||
}
|
||||
configNames[name] = config
|
||||
|
||||
fieldName := configFieldPrefix + yamlKey // Field must be exported.
|
||||
configFieldNames[elemType] = fieldName
|
||||
|
||||
// Insert fields in sorted order.
|
||||
i := sort.Search(len(configFields), func(k int) bool {
|
||||
return fieldName < configFields[k].Name
|
||||
})
|
||||
configFields = append(configFields, reflect.StructField{}) // Add empty field at end.
|
||||
copy(configFields[i+1:], configFields[i:]) // Shift fields to the right.
|
||||
configFields[i] = reflect.StructField{ // Write new field in place.
|
||||
Name: fieldName,
|
||||
Type: reflect.SliceOf(elemType),
|
||||
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
|
||||
}
|
||||
}
|
||||
|
||||
func getConfigType(out reflect.Type) reflect.Type {
|
||||
configTypesMu.Lock()
|
||||
defer configTypesMu.Unlock()
|
||||
if typ, ok := configTypes[out]; ok {
|
||||
return typ
|
||||
}
|
||||
// Initial exported fields map one-to-one.
|
||||
var fields []reflect.StructField
|
||||
for i, n := 0, out.NumField(); i < n; i++ {
|
||||
switch field := out.Field(i); {
|
||||
case field.PkgPath == "" && field.Type != configsType:
|
||||
fields = append(fields, field)
|
||||
default:
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: "_" + field.Name, // Field must be unexported.
|
||||
PkgPath: out.PkgPath(),
|
||||
Type: emptyStructType,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Append extra config fields on the end.
|
||||
fields = append(fields, configFields...)
|
||||
typ := reflect.StructOf(fields)
|
||||
configTypes[out] = typ
|
||||
return typ
|
||||
}
|
||||
|
||||
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
|
||||
outVal := reflect.ValueOf(out)
|
||||
if outVal.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outVal = outVal.Elem()
|
||||
if outVal.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outTyp := outVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(outTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields (defaults) to dynamic value.
|
||||
var configs *Configs
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if outTyp.Field(i).Type == configsType {
|
||||
configs = outVal.Field(i).Addr().Interface().(*Configs)
|
||||
continue
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(outVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return fmt.Errorf("discovery: Configs field not found in type: %T", out)
|
||||
}
|
||||
|
||||
// Unmarshal into dynamic value.
|
||||
if err := unmarshal(cfgPtr.Interface()); err != nil {
|
||||
return replaceYAMLTypeError(err, cfgTyp, outTyp)
|
||||
}
|
||||
|
||||
// Copy shared fields from dynamic value.
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
outVal.Field(i).Set(cfgVal.Field(i))
|
||||
}
|
||||
|
||||
var err error
|
||||
*configs, err = readConfigs(cfgVal, outVal.NumField())
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigs(structVal reflect.Value, startField int) (Configs, error) {
|
||||
var (
|
||||
configs Configs
|
||||
targets []*targetgroup.Group
|
||||
)
|
||||
for i, n := startField, structVal.NumField(); i < n; i++ {
|
||||
field := structVal.Field(i)
|
||||
if field.Kind() != reflect.Slice {
|
||||
panic("discovery: internal error: field is not a slice")
|
||||
}
|
||||
for k := 0; k < field.Len(); k++ {
|
||||
val := field.Index(k)
|
||||
if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) {
|
||||
key := configFieldNames[field.Type().Elem()]
|
||||
key = strings.TrimPrefix(key, configFieldPrefix)
|
||||
return nil, fmt.Errorf("empty or null section in %s", key)
|
||||
}
|
||||
switch c := val.Interface().(type) {
|
||||
case *targetgroup.Group:
|
||||
// Add index to the static config target groups for unique identification
|
||||
// within scrape pool.
|
||||
c.Source = strconv.Itoa(len(targets))
|
||||
// Coalesce multiple static configs into a single static config.
|
||||
targets = append(targets, c)
|
||||
case Config:
|
||||
configs = append(configs, c)
|
||||
default:
|
||||
panic("discovery: internal error: slice element is not a Config")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(targets) > 0 {
|
||||
configs = append(configs, StaticConfig(targets))
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
|
||||
inVal := reflect.ValueOf(in)
|
||||
for inVal.Kind() == reflect.Ptr {
|
||||
inVal = inVal.Elem()
|
||||
}
|
||||
inTyp := inVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(inTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields to dynamic value.
|
||||
var configs *Configs
|
||||
for i, n := 0, inTyp.NumField(); i < n; i++ {
|
||||
if inTyp.Field(i).Type == configsType {
|
||||
configs = inVal.Field(i).Addr().Interface().(*Configs)
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(inVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in)
|
||||
}
|
||||
|
||||
if err := writeConfigs(cfgVal, *configs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfgPtr.Interface(), nil
|
||||
}
|
||||
|
||||
func writeConfigs(structVal reflect.Value, configs Configs) error {
|
||||
targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group)
|
||||
for _, c := range configs {
|
||||
if sc, ok := c.(StaticConfig); ok {
|
||||
*targets = append(*targets, sc...)
|
||||
continue
|
||||
}
|
||||
fieldName, ok := configFieldNames[reflect.TypeOf(c)]
|
||||
if !ok {
|
||||
return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c)
|
||||
}
|
||||
field := structVal.FieldByName(fieldName)
|
||||
field.Set(reflect.Append(field, reflect.ValueOf(c)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||
if e, ok := err.(*yaml.TypeError); ok {
|
||||
oldStr := oldTyp.String()
|
||||
newStr := newTyp.String()
|
||||
for i, s := range e.Errors {
|
||||
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -27,9 +27,10 @@ import (
|
|||
"github.com/go-kit/kit/log"
|
||||
conntrack "github.com/mwitkow/go-conntrack"
|
||||
"github.com/pkg/errors"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -52,17 +53,34 @@ var DefaultSDConfig = SDConfig{
|
|||
Version: 1,
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for Triton based service discovery.
|
||||
type SDConfig struct {
|
||||
Account string `yaml:"account"`
|
||||
Role string `yaml:"role"`
|
||||
DNSSuffix string `yaml:"dns_suffix"`
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Groups []string `yaml:"groups,omitempty"`
|
||||
Port int `yaml:"port"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
Version int `yaml:"version"`
|
||||
Account string `yaml:"account"`
|
||||
Role string `yaml:"role"`
|
||||
DNSSuffix string `yaml:"dns_suffix"`
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Groups []string `yaml:"groups,omitempty"`
|
||||
Port int `yaml:"port"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
Version int `yaml:"version"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "triton" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return New(opts.Logger, c)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.TLSConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -122,7 +140,7 @@ type Discovery struct {
|
|||
|
||||
// New returns a new Discovery which periodically refreshes its targets.
|
||||
func New(logger log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||
tls, err := config_util.NewTLSConfig(&conf.TLSConfig)
|
||||
tls, err := config.NewTLSConfig(&conf.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
"github.com/prometheus/prometheus/util/treecache"
|
||||
|
@ -43,6 +44,11 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&ServersetSDConfig{})
|
||||
discovery.RegisterConfig(&NerveSDConfig{})
|
||||
}
|
||||
|
||||
// ServersetSDConfig is the configuration for Twitter serversets in Zookeeper based discovery.
|
||||
type ServersetSDConfig struct {
|
||||
Servers []string `yaml:"servers"`
|
||||
|
@ -50,6 +56,14 @@ type ServersetSDConfig struct {
|
|||
Timeout model.Duration `yaml:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*ServersetSDConfig) Name() string { return "serverset" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewServersetDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultServersetSDConfig
|
||||
|
@ -79,6 +93,14 @@ type NerveSDConfig struct {
|
|||
Timeout model.Duration `yaml:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*NerveSDConfig) Name() string { return "nerve" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewNerveDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultNerveSDConfig
|
||||
|
|
2
go.mod
2
go.mod
|
@ -53,7 +53,7 @@ require (
|
|||
github.com/prometheus/alertmanager v0.21.0
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.11.1
|
||||
github.com/prometheus/common v0.13.0
|
||||
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c
|
||||
|
|
4
go.sum
4
go.sum
|
@ -656,8 +656,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
|
|||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.11.1 h1:0ZISXCMRuCZcxF77aT1BXY5m74mX2vrGYl1dSwBI0Jo=
|
||||
github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.13.0 h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg=
|
||||
github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
|
|
@ -634,7 +634,7 @@ type alertmanagerSet struct {
|
|||
}
|
||||
|
||||
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", false)
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
Username: "prometheus",
|
||||
Password: "testing_password",
|
||||
},
|
||||
}, "auth_alertmanager", false)
|
||||
}, "auth_alertmanager", false, false)
|
||||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, false)
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, false, false)
|
||||
if err != nil {
|
||||
targetScrapePoolsFailed.Inc()
|
||||
return nil, errors.Wrap(err, "error creating HTTP client")
|
||||
|
@ -321,7 +321,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
sp.mtx.Lock()
|
||||
defer sp.mtx.Unlock()
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, false)
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, false, false)
|
||||
if err != nil {
|
||||
targetScrapePoolReloadsFailed.Inc()
|
||||
return errors.Wrap(err, "error creating HTTP client")
|
||||
|
|
|
@ -151,7 +151,7 @@ func TestNewHTTPBearerToken(t *testing.T) {
|
|||
cfg := config_util.HTTPClientConfig{
|
||||
BearerToken: "1234",
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
|
|||
cfg := config_util.HTTPClientConfig{
|
||||
BearerTokenFile: "testdata/bearertoken.txt",
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
|
|||
Password: "password123",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ func TestNewHTTPCACert(t *testing.T) {
|
|||
CAFile: caCertPath,
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ func TestNewHTTPClientCert(t *testing.T) {
|
|||
KeyFile: "testdata/client.key",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
|
|||
ServerName: "prometheus.rocks",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
|
|||
ServerName: "badname",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) {
|
|||
KeyFile: "testdata/nonexistent_client.key",
|
||||
},
|
||||
}
|
||||
_, err := config_util.NewClientFromConfig(cfg, "test", false)
|
||||
_, err := config_util.NewClientFromConfig(cfg, "test", false, false)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, got nil.")
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ type ReadClient interface {
|
|||
|
||||
// newReadClient creates a new client for remote read.
|
||||
func newReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client", false)
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ func newReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
|||
|
||||
// NewWriteClient creates a new client for remote write.
|
||||
func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client", false)
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client", false, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
19
vendor/github.com/prometheus/common/config/config.go
generated
vendored
19
vendor/github.com/prometheus/common/config/config.go
generated
vendored
|
@ -16,6 +16,8 @@
|
|||
|
||||
package config
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Secret special type for storing secrets.
|
||||
type Secret string
|
||||
|
||||
|
@ -32,3 +34,20 @@ func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
type plain Secret
|
||||
return unmarshal((*plain)(s))
|
||||
}
|
||||
|
||||
// DirectorySetter is a config type that contains file paths that may
|
||||
// be relative to the file containing the config.
|
||||
type DirectorySetter interface {
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
// Any paths that are empty or absolute remain unchanged.
|
||||
SetDirectory(dir string)
|
||||
}
|
||||
|
||||
// JoinDir joins dir and path if path is relative.
|
||||
// If path is empty or absolute, it is returned unchanged.
|
||||
func JoinDir(dir, path string) string {
|
||||
if path == "" || filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return filepath.Join(dir, path)
|
||||
}
|
||||
|
|
50
vendor/github.com/prometheus/common/config/http_config.go
generated
vendored
50
vendor/github.com/prometheus/common/config/http_config.go
generated
vendored
|
@ -44,6 +44,14 @@ type BasicAuth struct {
|
|||
PasswordFile string `yaml:"password_file,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (a *BasicAuth) SetDirectory(dir string) {
|
||||
if a == nil {
|
||||
return
|
||||
}
|
||||
a.PasswordFile = JoinDir(dir, a.PasswordFile)
|
||||
}
|
||||
|
||||
// URL is a custom URL type that allows validation at configuration load time.
|
||||
type URL struct {
|
||||
*url.URL
|
||||
|
@ -86,6 +94,16 @@ type HTTPClientConfig struct {
|
|||
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *HTTPClientConfig) SetDirectory(dir string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.TLSConfig.SetDirectory(dir)
|
||||
c.BasicAuth.SetDirectory(dir)
|
||||
c.BearerTokenFile = JoinDir(dir, c.BearerTokenFile)
|
||||
}
|
||||
|
||||
// Validate validates the HTTPClientConfig to check only one of BearerToken,
|
||||
// BasicAuth and BearerTokenFile is configured.
|
||||
func (c *HTTPClientConfig) Validate() error {
|
||||
|
@ -123,8 +141,8 @@ func newClient(rt http.RoundTripper) *http.Client {
|
|||
|
||||
// NewClientFromConfig returns a new HTTP client configured for the
|
||||
// given config.HTTPClientConfig. The name is used as go-conntrack metric label.
|
||||
func NewClientFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives bool) (*http.Client, error) {
|
||||
rt, err := NewRoundTripperFromConfig(cfg, name, disableKeepAlives)
|
||||
func NewClientFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives, enableHTTP2 bool) (*http.Client, error) {
|
||||
rt, err := NewRoundTripperFromConfig(cfg, name, disableKeepAlives, enableHTTP2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -133,7 +151,7 @@ func NewClientFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives bo
|
|||
|
||||
// NewRoundTripperFromConfig returns a new HTTP RoundTripper configured for the
|
||||
// given config.HTTPClientConfig. The name is used as go-conntrack metric label.
|
||||
func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives bool) (http.RoundTripper, error) {
|
||||
func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, disableKeepAlives, enableHTTP2 bool) (http.RoundTripper, error) {
|
||||
newRT := func(tlsConfig *tls.Config) (http.RoundTripper, error) {
|
||||
// The only timeout we care about is the configured scrape timeout.
|
||||
// It is applied on request. So we leave out any timings here.
|
||||
|
@ -154,10 +172,18 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, disableKeepAli
|
|||
conntrack.DialWithName(name),
|
||||
),
|
||||
}
|
||||
// TODO: use ForceAttemptHTTP2 when we move to Go 1.13+.
|
||||
err := http2.ConfigureTransport(rt.(*http.Transport))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if enableHTTP2 {
|
||||
// HTTP/2 support is golang has many problematic cornercases where
|
||||
// dead connections would be kept and used in connection pools.
|
||||
// https://github.com/golang/go/issues/32388
|
||||
// https://github.com/golang/go/issues/39337
|
||||
// https://github.com/golang/go/issues/39750
|
||||
// TODO: Re-Enable HTTP/2 once upstream issue is fixed.
|
||||
// TODO: use ForceAttemptHTTP2 when we move to Go 1.13+.
|
||||
err := http2.ConfigureTransport(rt.(*http.Transport))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If a bearer token is provided, create a round tripper that will set the
|
||||
|
@ -344,6 +370,16 @@ type TLSConfig struct {
|
|||
InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *TLSConfig) SetDirectory(dir string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.CAFile = JoinDir(dir, c.CAFile)
|
||||
c.CertFile = JoinDir(dir, c.CertFile)
|
||||
c.KeyFile = JoinDir(dir, c.KeyFile)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type plain TLSConfig
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -385,7 +385,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint
|
|||
# github.com/prometheus/client_model v0.2.0
|
||||
## explicit
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.11.1
|
||||
# github.com/prometheus/common v0.13.0
|
||||
## explicit
|
||||
github.com/prometheus/common/config
|
||||
github.com/prometheus/common/expfmt
|
||||
|
|
Loading…
Reference in a new issue