diff --git a/config/config_test.go b/config/config_test.go index 6150773e7..4d8018c07 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -806,7 +806,7 @@ var expectedErrors = []struct { errMsg: "role", }, { filename: "kubernetes_selectors_endpoints.bad.yml", - errMsg: "endpoints role supports only pod, service and endpoints selectors", + errMsg: "endpoints role supports only pod, service, endpoints selectors", }, { filename: "kubernetes_selectors_ingress.bad.yml", errMsg: "ingress role supports only ingress selectors", @@ -822,6 +822,12 @@ var expectedErrors = []struct { }, { filename: "kubernetes_namespace_discovery.bad.yml", errMsg: "field foo not found in type kubernetes.plain", + }, { + filename: "kubernetes_selectors_duplicated_role.bad.yml", + errMsg: "duplicated selector role: pod", + }, { + filename: "kubernetes_selectors_incorrect_selector.bad.yml", + errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", }, { filename: "kubernetes_bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", diff --git a/config/testdata/kubernetes_selectors_duplicated_role.bad.yml b/config/testdata/kubernetes_selectors_duplicated_role.bad.yml new file mode 100644 index 000000000..dc3dec25c --- /dev/null +++ b/config/testdata/kubernetes_selectors_duplicated_role.bad.yml @@ -0,0 +1,11 @@ +scrape_configs: +- job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + selectors: + - role: "pod" + label: "foo=bar" + field: "metadata.status=Running" + - role: "pod" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_endpoints.bad.yml b/config/testdata/kubernetes_selectors_endpoints.bad.yml index afd47e8f5..d2128f374 100644 --- a/config/testdata/kubernetes_selectors_endpoints.bad.yml +++ b/config/testdata/kubernetes_selectors_endpoints.bad.yml @@ -3,12 +3,12 @@ scrape_configs: kubernetes_sd_configs: - role: endpoints selectors: - node: - label: "foo=bar" - field: "metadata.status=Running" - service: - label: "foo=bar" - field: "metadata.status=Running" - endpoints: - label: "foo=bar" - field: "metadata.status=Running" + - role: "node" + label: "foo=bar" + field: "metadata.status=Running" + - role: "service" + label: "foo=bar" + field: "metadata.status=Running" + - role: "endpoints" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_endpoints.good.yml b/config/testdata/kubernetes_selectors_endpoints.good.yml index 25ecddfcd..d0b22ff93 100644 --- a/config/testdata/kubernetes_selectors_endpoints.good.yml +++ b/config/testdata/kubernetes_selectors_endpoints.good.yml @@ -3,12 +3,12 @@ scrape_configs: kubernetes_sd_configs: - role: endpoints selectors: - pod: - label: "foo=bar" - field: "metadata.status=Running" - service: - label: "foo=bar" - field: "metadata.status=Running" - endpoints: - label: "foo=bar" - field: "metadata.status=Running" + - role: "pod" + label: "foo=bar" + field: "metadata.status=Running" + - role: "service" + label: "foo=bar" + field: "metadata.status=Running" + - role: "endpoints" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml b/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml new file mode 100644 index 000000000..72d2d90e6 --- /dev/null +++ b/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml @@ -0,0 +1,7 @@ +scrape_configs: +- job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + selectors: + - role: "pod" + field: "metadata.status-Running" diff --git a/config/testdata/kubernetes_selectors_ingress.bad.yml b/config/testdata/kubernetes_selectors_ingress.bad.yml index bcf815069..7698de35a 100644 --- a/config/testdata/kubernetes_selectors_ingress.bad.yml +++ b/config/testdata/kubernetes_selectors_ingress.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: ingress selectors: - node: + - role: "node" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_ingress.good.yml b/config/testdata/kubernetes_selectors_ingress.good.yml index b9955c522..6aa1edf20 100644 --- a/config/testdata/kubernetes_selectors_ingress.good.yml +++ b/config/testdata/kubernetes_selectors_ingress.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: ingress selectors: - ingress: + - role: "ingress" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_node.bad.yml b/config/testdata/kubernetes_selectors_node.bad.yml index 0636e8f26..969435151 100644 --- a/config/testdata/kubernetes_selectors_node.bad.yml +++ b/config/testdata/kubernetes_selectors_node.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: node selectors: - pod: + - role: "pod" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_node.good.yml b/config/testdata/kubernetes_selectors_node.good.yml index 0ae1f16aa..d355c62ab 100644 --- a/config/testdata/kubernetes_selectors_node.good.yml +++ b/config/testdata/kubernetes_selectors_node.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: node selectors: - node: + - role: "node" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.bad.yml b/config/testdata/kubernetes_selectors_pod.bad.yml index afdfbfdea..daa7290a4 100644 --- a/config/testdata/kubernetes_selectors_pod.bad.yml +++ b/config/testdata/kubernetes_selectors_pod.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: pod selectors: - node: + - role: "node" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.good.yml b/config/testdata/kubernetes_selectors_pod.good.yml index f27396511..e5ab315fd 100644 --- a/config/testdata/kubernetes_selectors_pod.good.yml +++ b/config/testdata/kubernetes_selectors_pod.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: pod selectors: - pod: + - role: "pod" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_service.bad.yml b/config/testdata/kubernetes_selectors_service.bad.yml index 0ed850162..78179be69 100644 --- a/config/testdata/kubernetes_selectors_service.bad.yml +++ b/config/testdata/kubernetes_selectors_service.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: service selectors: - pod: + - role: "pod" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_service.good.yml b/config/testdata/kubernetes_selectors_service.good.yml index 500834c81..9c7705dc6 100644 --- a/config/testdata/kubernetes_selectors_service.good.yml +++ b/config/testdata/kubernetes_selectors_service.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: service selectors: - service: + - role: "service" label: "foo=bar" field: "metadata.status=Running" diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 04b36a4f5..abfdcc3d4 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -16,6 +16,7 @@ package kubernetes import ( "context" "reflect" + "strings" "sync" "time" @@ -28,6 +29,7 @@ import ( apiv1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -91,22 +93,28 @@ type SDConfig struct { Role Role `yaml:"role"` HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"` - Selectors RoleSelectorConfig `yaml:"selectors,omitempty"` + Selectors []SelectorConfig `yaml:"selectors,omitempty"` } -type RoleSelectorConfig struct { - Node ResourceSelectorConfig `yaml:"node,omitempty"` - Pod ResourceSelectorConfig `yaml:"pod,omitempty"` - Service ResourceSelectorConfig `yaml:"service,omitempty"` - Endpoints ResourceSelectorConfig `yaml:"endpoints,omitempty"` - Ingress ResourceSelectorConfig `yaml:"ingress,omitempty"` +type roleSelector struct { + node resourceSelector + pod resourceSelector + service resourceSelector + endpoints resourceSelector + ingress resourceSelector } -type ResourceSelectorConfig struct { +type SelectorConfig struct { + Role Role `yaml:"role,omitempty"` Label string `yaml:"label,omitempty"` Field string `yaml:"field,omitempty"` } +type resourceSelector struct { + label string + field string +} + // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = SDConfig{} @@ -115,6 +123,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } + if c.Role == "" { + return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)") + } err = c.HTTPClientConfig.Validate() if err != nil { return err @@ -122,29 +133,45 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config_util.HTTPClientConfig{}) { return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } - switch c.Role { - case "pod": - if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 { - return errors.Errorf("pod role supports only pod selectors") + + foundSelectorRoles := make(map[Role]struct{}) + allowedSelectors := map[Role][]string{ + RolePod: {string(RolePod)}, + RoleService: {string(RoleService)}, + RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)}, + RoleNode: {string(RoleNode)}, + RoleIngress: {string(RoleIngress)}, + } + + for _, selector := range c.Selectors { + if _, ok := foundSelectorRoles[selector.Role]; ok { + return errors.Errorf("duplicated selector role: %s", selector.Role) } - case "service": - if len(c.Selectors.Pod.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 { - return errors.Errorf("service role supports only service selectors") + foundSelectorRoles[selector.Role] = struct{}{} + + if _, ok := allowedSelectors[c.Role]; !ok { + return errors.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, node or ingress", c.Role) } - case "endpoints": - if len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 { - return errors.Errorf("endpoints role supports only pod, service and endpoints selectors") + var allowed bool + for _, role := range allowedSelectors[c.Role] { + if role == string(selector.Role) { + allowed = true + break + } } - case "node": - if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Pod.Field) > 0 { - return errors.Errorf("node role supports only node selectors") + + if !allowed { + return errors.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", ")) } - case "ingress": - if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Node.Field) > 0 || len(c.Selectors.Pod.Field) > 0 { - return errors.Errorf("ingress role supports only ingress selectors") + + _, err := fields.ParseSelector(selector.Field) + if err != nil { + return err + } + _, err = fields.ParseSelector(selector.Label) + if err != nil { + return err } - default: - return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)") } return nil } @@ -196,7 +223,7 @@ type Discovery struct { logger log.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discoverer - selectors RoleSelectorConfig + selectors roleSelector } func (d *Discovery) getNamespaces() []string { @@ -247,10 +274,34 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { role: conf.Role, namespaceDiscovery: &conf.NamespaceDiscovery, discoverers: make([]discoverer, 0), - selectors: conf.Selectors, + selectors: mapSelector(conf.Selectors), }, nil } +func mapSelector(rawSelector []SelectorConfig) roleSelector { + rs := roleSelector{} + for _, resourceSelectorRaw := range rawSelector { + switch resourceSelectorRaw.Role { + case RoleEndpoint: + rs.endpoints.field = resourceSelectorRaw.Field + rs.endpoints.label = resourceSelectorRaw.Label + case RoleIngress: + rs.ingress.field = resourceSelectorRaw.Field + rs.ingress.label = resourceSelectorRaw.Label + case RoleNode: + rs.node.field = resourceSelectorRaw.Field + rs.node.label = resourceSelectorRaw.Label + case RolePod: + rs.pod.field = resourceSelectorRaw.Field + rs.pod.label = resourceSelectorRaw.Label + case RoleService: + rs.service.field = resourceSelectorRaw.Field + rs.service.label = resourceSelectorRaw.Label + } + } + return rs +} + const resyncPeriod = 10 * time.Minute // Run implements the discoverer interface. @@ -264,39 +315,39 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { e := d.client.CoreV1().Endpoints(namespace) elw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Endpoints.Field - options.LabelSelector = d.selectors.Endpoints.Label + options.FieldSelector = d.selectors.endpoints.field + options.LabelSelector = d.selectors.endpoints.label return e.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Endpoints.Field - options.LabelSelector = d.selectors.Endpoints.Label + options.FieldSelector = d.selectors.endpoints.field + options.LabelSelector = d.selectors.endpoints.label return e.Watch(options) }, } s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Service.Field - options.LabelSelector = d.selectors.Service.Label + options.FieldSelector = d.selectors.service.field + options.LabelSelector = d.selectors.service.label return s.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Service.Field - options.LabelSelector = d.selectors.Service.Label + options.FieldSelector = d.selectors.service.field + options.LabelSelector = d.selectors.service.label return s.Watch(options) }, } p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Pod.Field - options.LabelSelector = d.selectors.Pod.Label + options.FieldSelector = d.selectors.pod.field + options.LabelSelector = d.selectors.pod.label return p.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Pod.Field - options.LabelSelector = d.selectors.Pod.Label + options.FieldSelector = d.selectors.pod.field + options.LabelSelector = d.selectors.pod.label return p.Watch(options) }, } @@ -316,13 +367,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Pod.Field - options.LabelSelector = d.selectors.Pod.Label + options.FieldSelector = d.selectors.pod.field + options.LabelSelector = d.selectors.pod.label return p.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Pod.Field - options.LabelSelector = d.selectors.Pod.Label + options.FieldSelector = d.selectors.pod.field + options.LabelSelector = d.selectors.pod.label return p.Watch(options) }, } @@ -338,13 +389,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Service.Field - options.LabelSelector = d.selectors.Service.Label + options.FieldSelector = d.selectors.service.field + options.LabelSelector = d.selectors.service.label return s.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Service.Field - options.LabelSelector = d.selectors.Service.Label + options.FieldSelector = d.selectors.service.field + options.LabelSelector = d.selectors.service.label return s.Watch(options) }, } @@ -360,13 +411,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { i := d.client.ExtensionsV1beta1().Ingresses(namespace) ilw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Ingress.Field - options.LabelSelector = d.selectors.Ingress.Label + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label return i.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Ingress.Field - options.LabelSelector = d.selectors.Ingress.Label + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label return i.Watch(options) }, } @@ -380,13 +431,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { case RoleNode: nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.Node.Field - options.LabelSelector = d.selectors.Node.Label + options.FieldSelector = d.selectors.node.field + options.LabelSelector = d.selectors.node.label return d.client.CoreV1().Nodes().List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.Node.Field - options.LabelSelector = d.selectors.Node.Label + options.FieldSelector = d.selectors.node.field + options.LabelSelector = d.selectors.node.label return d.client.CoreV1().Nodes().Watch(options) }, } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index d9e176df4..9cc7a159a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -860,10 +860,17 @@ namespaces: # and https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ to learn more about the possible # filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles # only support selectors matching the role itself (e.g. node role can only contain node selectors). -selectors: - : + +# Note: When making decision about using field/label selector make sure that this +# is the best approach - it will prevent Prometheus from reusing single list/watch +# for all scrape configs. This might result in a bigger load on the Kubernetes API, +# because per each selector combination there will be additional LIST/WATCH. On the other hand, +# if you just want to monitor small subset of pods in large cluster it's recommended to use selectors. +# Decision, if selectors should be used or not depends on the particular situation. +[ selectors: + [ - role: [ label: ] - [ field: ] + [ field: ] ]] ``` Where `` must be `endpoints`, `service`, `pod`, `node`, or