From b4445ff03ff961c6b53290c3d6da03c585e85be2 Mon Sep 17 00:00:00 2001 From: Grebennikov Roman Date: Thu, 3 Oct 2019 13:55:42 +0200 Subject: [PATCH] discovery/kubernetes: expose label_selector and field_selector Closes #6096 Signed-off-by: Grebennikov Roman --- config/config_test.go | 28 ++++++++ .../kubernetes_selectors_endpoints.bad.yml | 14 ++++ .../kubernetes_selectors_endpoints.good.yml | 14 ++++ .../kubernetes_selectors_ingress.bad.yml | 8 +++ .../kubernetes_selectors_ingress.good.yml | 8 +++ .../kubernetes_selectors_node.bad.yml | 8 +++ .../kubernetes_selectors_node.good.yml | 8 +++ .../testdata/kubernetes_selectors_pod.bad.yml | 8 +++ .../kubernetes_selectors_pod.good.yml | 8 +++ .../kubernetes_selectors_service.bad.yml | 8 +++ .../kubernetes_selectors_service.good.yml | 8 +++ discovery/kubernetes/kubernetes.go | 71 ++++++++++++++++++- docs/configuration/configuration.md | 10 +++ 13 files changed, 198 insertions(+), 3 deletions(-) create mode 100644 config/testdata/kubernetes_selectors_endpoints.bad.yml create mode 100644 config/testdata/kubernetes_selectors_endpoints.good.yml create mode 100644 config/testdata/kubernetes_selectors_ingress.bad.yml create mode 100644 config/testdata/kubernetes_selectors_ingress.good.yml create mode 100644 config/testdata/kubernetes_selectors_node.bad.yml create mode 100644 config/testdata/kubernetes_selectors_node.good.yml create mode 100644 config/testdata/kubernetes_selectors_pod.bad.yml create mode 100644 config/testdata/kubernetes_selectors_pod.good.yml create mode 100644 config/testdata/kubernetes_selectors_service.bad.yml create mode 100644 config/testdata/kubernetes_selectors_service.good.yml diff --git a/config/config_test.go b/config/config_test.go index 4fbbded64..6150773e7 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -709,6 +709,19 @@ func TestKubernetesEmptyAPIServer(t *testing.T) { testutil.Ok(t, err) } +func TestKubernetesSelectors(t *testing.T) { + _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml") + testutil.Ok(t, err) + _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml") + testutil.Ok(t, err) + _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml") + testutil.Ok(t, err) + _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml") + testutil.Ok(t, err) + _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml") + testutil.Ok(t, err) +} + var expectedErrors = []struct { filename string errMsg string @@ -791,6 +804,21 @@ var expectedErrors = []struct { }, { filename: "kubernetes_role.bad.yml", errMsg: "role", + }, { + filename: "kubernetes_selectors_endpoints.bad.yml", + errMsg: "endpoints role supports only pod, service and endpoints selectors", + }, { + filename: "kubernetes_selectors_ingress.bad.yml", + errMsg: "ingress role supports only ingress selectors", + }, { + filename: "kubernetes_selectors_node.bad.yml", + errMsg: "node role supports only node selectors", + }, { + filename: "kubernetes_selectors_pod.bad.yml", + errMsg: "pod role supports only pod selectors", + }, { + filename: "kubernetes_selectors_service.bad.yml", + errMsg: "service role supports only service selectors", }, { filename: "kubernetes_namespace_discovery.bad.yml", errMsg: "field foo not found in type kubernetes.plain", diff --git a/config/testdata/kubernetes_selectors_endpoints.bad.yml b/config/testdata/kubernetes_selectors_endpoints.bad.yml new file mode 100644 index 000000000..afd47e8f5 --- /dev/null +++ b/config/testdata/kubernetes_selectors_endpoints.bad.yml @@ -0,0 +1,14 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + selectors: + node: + label: "foo=bar" + field: "metadata.status=Running" + service: + label: "foo=bar" + field: "metadata.status=Running" + endpoints: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_endpoints.good.yml b/config/testdata/kubernetes_selectors_endpoints.good.yml new file mode 100644 index 000000000..25ecddfcd --- /dev/null +++ b/config/testdata/kubernetes_selectors_endpoints.good.yml @@ -0,0 +1,14 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + selectors: + pod: + label: "foo=bar" + field: "metadata.status=Running" + service: + label: "foo=bar" + field: "metadata.status=Running" + endpoints: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_ingress.bad.yml b/config/testdata/kubernetes_selectors_ingress.bad.yml new file mode 100644 index 000000000..bcf815069 --- /dev/null +++ b/config/testdata/kubernetes_selectors_ingress.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: ingress + selectors: + node: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_ingress.good.yml b/config/testdata/kubernetes_selectors_ingress.good.yml new file mode 100644 index 000000000..b9955c522 --- /dev/null +++ b/config/testdata/kubernetes_selectors_ingress.good.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: ingress + selectors: + ingress: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_node.bad.yml b/config/testdata/kubernetes_selectors_node.bad.yml new file mode 100644 index 000000000..0636e8f26 --- /dev/null +++ b/config/testdata/kubernetes_selectors_node.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: node + selectors: + pod: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_node.good.yml b/config/testdata/kubernetes_selectors_node.good.yml new file mode 100644 index 000000000..0ae1f16aa --- /dev/null +++ b/config/testdata/kubernetes_selectors_node.good.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: node + selectors: + node: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.bad.yml b/config/testdata/kubernetes_selectors_pod.bad.yml new file mode 100644 index 000000000..afdfbfdea --- /dev/null +++ b/config/testdata/kubernetes_selectors_pod.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: pod + selectors: + node: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.good.yml b/config/testdata/kubernetes_selectors_pod.good.yml new file mode 100644 index 000000000..f27396511 --- /dev/null +++ b/config/testdata/kubernetes_selectors_pod.good.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: pod + selectors: + pod: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_service.bad.yml b/config/testdata/kubernetes_selectors_service.bad.yml new file mode 100644 index 000000000..0ed850162 --- /dev/null +++ b/config/testdata/kubernetes_selectors_service.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: service + selectors: + pod: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_service.good.yml b/config/testdata/kubernetes_selectors_service.good.yml new file mode 100644 index 000000000..500834c81 --- /dev/null +++ b/config/testdata/kubernetes_selectors_service.good.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: service + selectors: + service: + label: "foo=bar" + field: "metadata.status=Running" diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index f8913c4e6..04b36a4f5 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -91,6 +91,20 @@ type SDConfig struct { Role Role `yaml:"role"` HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"` + Selectors RoleSelectorConfig `yaml:"selectors,omitempty"` +} + +type RoleSelectorConfig struct { + Node ResourceSelectorConfig `yaml:"node,omitempty"` + Pod ResourceSelectorConfig `yaml:"pod,omitempty"` + Service ResourceSelectorConfig `yaml:"service,omitempty"` + Endpoints ResourceSelectorConfig `yaml:"endpoints,omitempty"` + Ingress ResourceSelectorConfig `yaml:"ingress,omitempty"` +} + +type ResourceSelectorConfig struct { + Label string `yaml:"label,omitempty"` + Field string `yaml:"field,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -101,9 +115,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } - if c.Role == "" { - return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)") - } err = c.HTTPClientConfig.Validate() if err != nil { return err @@ -111,6 +122,30 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config_util.HTTPClientConfig{}) { return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } + switch c.Role { + case "pod": + if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 { + return errors.Errorf("pod role supports only pod selectors") + } + case "service": + if len(c.Selectors.Pod.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 { + return errors.Errorf("service role supports only service selectors") + } + case "endpoints": + if len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 { + return errors.Errorf("endpoints role supports only pod, service and endpoints selectors") + } + case "node": + if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Pod.Field) > 0 { + return errors.Errorf("node role supports only node selectors") + } + case "ingress": + if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Node.Field) > 0 || len(c.Selectors.Pod.Field) > 0 { + return errors.Errorf("ingress role supports only ingress selectors") + } + default: + return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)") + } return nil } @@ -161,6 +196,7 @@ type Discovery struct { logger log.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discoverer + selectors RoleSelectorConfig } func (d *Discovery) getNamespaces() []string { @@ -211,6 +247,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { role: conf.Role, namespaceDiscovery: &conf.NamespaceDiscovery, discoverers: make([]discoverer, 0), + selectors: conf.Selectors, }, nil } @@ -227,27 +264,39 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { e := d.client.CoreV1().Endpoints(namespace) elw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Endpoints.Field + options.LabelSelector = d.selectors.Endpoints.Label return e.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Endpoints.Field + options.LabelSelector = d.selectors.Endpoints.Label return e.Watch(options) }, } s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Service.Field + options.LabelSelector = d.selectors.Service.Label return s.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Service.Field + options.LabelSelector = d.selectors.Service.Label return s.Watch(options) }, } p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Pod.Field + options.LabelSelector = d.selectors.Pod.Label return p.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Pod.Field + options.LabelSelector = d.selectors.Pod.Label return p.Watch(options) }, } @@ -267,9 +316,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Pod.Field + options.LabelSelector = d.selectors.Pod.Label return p.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Pod.Field + options.LabelSelector = d.selectors.Pod.Label return p.Watch(options) }, } @@ -285,9 +338,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Service.Field + options.LabelSelector = d.selectors.Service.Label return s.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Service.Field + options.LabelSelector = d.selectors.Service.Label return s.Watch(options) }, } @@ -303,9 +360,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { i := d.client.ExtensionsV1beta1().Ingresses(namespace) ilw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Ingress.Field + options.LabelSelector = d.selectors.Ingress.Label return i.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Ingress.Field + options.LabelSelector = d.selectors.Ingress.Label return i.Watch(options) }, } @@ -319,9 +380,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { case RoleNode: nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.Node.Field + options.LabelSelector = d.selectors.Node.Label return d.client.CoreV1().Nodes().List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.Node.Field + options.LabelSelector = d.selectors.Node.Label return d.client.CoreV1().Nodes().Watch(options) }, } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 413f6f26f..d9e176df4 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -854,6 +854,16 @@ tls_config: namespaces: names: [ - ] + +# Optional label and field selectors to limit the discovery process to a subset of available resources. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ +# and https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ to learn more about the possible +# filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles +# only support selectors matching the role itself (e.g. node role can only contain node selectors). +selectors: + : + [ label: ] + [ field: ] ``` Where `` must be `endpoints`, `service`, `pod`, `node`, or